rlaorrn commited on
Commit
6a026ce
·
verified ·
1 Parent(s): 462ffa0

Training in progress, step 500

Browse files
config.json ADDED
@@ -0,0 +1,118 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "facebook/wav2vec2-base",
3
+ "activation_dropout": 0.0,
4
+ "adapter_attn_dim": null,
5
+ "adapter_kernel_size": 3,
6
+ "adapter_stride": 2,
7
+ "add_adapter": false,
8
+ "apply_spec_augment": true,
9
+ "architectures": [
10
+ "Wav2Vec2ForCTC"
11
+ ],
12
+ "attention_dropout": 0.1,
13
+ "bos_token_id": 1,
14
+ "classifier_proj_size": 256,
15
+ "codevector_dim": 256,
16
+ "contrastive_logits_temperature": 0.1,
17
+ "conv_bias": false,
18
+ "conv_dim": [
19
+ 512,
20
+ 512,
21
+ 512,
22
+ 512,
23
+ 512,
24
+ 512,
25
+ 512
26
+ ],
27
+ "conv_kernel": [
28
+ 10,
29
+ 3,
30
+ 3,
31
+ 3,
32
+ 3,
33
+ 2,
34
+ 2
35
+ ],
36
+ "conv_stride": [
37
+ 5,
38
+ 2,
39
+ 2,
40
+ 2,
41
+ 2,
42
+ 2,
43
+ 2
44
+ ],
45
+ "ctc_loss_reduction": "sum",
46
+ "ctc_zero_infinity": false,
47
+ "diversity_loss_weight": 0.1,
48
+ "do_stable_layer_norm": false,
49
+ "eos_token_id": 2,
50
+ "feat_extract_activation": "gelu",
51
+ "feat_extract_norm": "group",
52
+ "feat_proj_dropout": 0.1,
53
+ "feat_quantizer_dropout": 0.0,
54
+ "final_dropout": 0.0,
55
+ "freeze_feat_extract_train": true,
56
+ "hidden_act": "gelu",
57
+ "hidden_dropout": 0.1,
58
+ "hidden_size": 768,
59
+ "initializer_range": 0.02,
60
+ "intermediate_size": 3072,
61
+ "layer_norm_eps": 1e-05,
62
+ "layerdrop": 0.0,
63
+ "mask_channel_length": 10,
64
+ "mask_channel_min_space": 1,
65
+ "mask_channel_other": 0.0,
66
+ "mask_channel_prob": 0.0,
67
+ "mask_channel_selection": "static",
68
+ "mask_feature_length": 10,
69
+ "mask_feature_min_masks": 0,
70
+ "mask_feature_prob": 0.0,
71
+ "mask_time_length": 10,
72
+ "mask_time_min_masks": 2,
73
+ "mask_time_min_space": 1,
74
+ "mask_time_other": 0.0,
75
+ "mask_time_prob": 0.05,
76
+ "mask_time_selection": "static",
77
+ "model_type": "wav2vec2",
78
+ "no_mask_channel_overlap": false,
79
+ "no_mask_time_overlap": false,
80
+ "num_adapter_layers": 3,
81
+ "num_attention_heads": 12,
82
+ "num_codevector_groups": 2,
83
+ "num_codevectors_per_group": 320,
84
+ "num_conv_pos_embedding_groups": 16,
85
+ "num_conv_pos_embeddings": 128,
86
+ "num_feat_extract_layers": 7,
87
+ "num_hidden_layers": 12,
88
+ "num_negatives": 100,
89
+ "output_hidden_size": 768,
90
+ "pad_token_id": 0,
91
+ "proj_codevector_dim": 256,
92
+ "tdnn_dilation": [
93
+ 1,
94
+ 2,
95
+ 3,
96
+ 1,
97
+ 1
98
+ ],
99
+ "tdnn_dim": [
100
+ 512,
101
+ 512,
102
+ 512,
103
+ 512,
104
+ 1500
105
+ ],
106
+ "tdnn_kernel": [
107
+ 5,
108
+ 3,
109
+ 3,
110
+ 1,
111
+ 1
112
+ ],
113
+ "torch_dtype": "float32",
114
+ "transformers_version": "4.39.3",
115
+ "use_weighted_layer_sum": false,
116
+ "vocab_size": 100000000000000,
117
+ "xvector_output_dim": 512
118
+ }
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ffef46ba0d91b26720ade5a0764b8eb00dd4cd8b026b05ad7fdf73b5bba92b4e
3
+ size 377611120
preprocessor_config.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "do_normalize": true,
3
+ "feature_extractor_type": "Wav2Vec2FeatureExtractor",
4
+ "feature_size": 1,
5
+ "padding_side": "right",
6
+ "padding_value": 0.0,
7
+ "return_attention_mask": false,
8
+ "sampling_rate": 16000
9
+ }
runs/May24_20-24-49_890e8b3ca76b/events.out.tfevents.1716582397.890e8b3ca76b.34.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2ac75e4115e01a183622b5727806489948ea4e87bf47a2c8f2b5e1a6c5093191
3
+ size 6307
runs/May24_20-31-07_890e8b3ca76b/events.out.tfevents.1716582710.890e8b3ca76b.34.1 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:60e1e37f65bdcdc419a35133bd27dc8b7b05882f02164c87954e2907a0730392
3
+ size 6307
runs/May24_20-34-06_890e8b3ca76b/events.out.tfevents.1716582886.890e8b3ca76b.34.2 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1ad92b4a9897c1c7d2d22b7271691629c322b0f513667db5be7bde20694cc1ac
3
+ size 6307
runs/May24_20-40-48_890e8b3ca76b/events.out.tfevents.1716583290.890e8b3ca76b.34.3 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6420e7a899ff2207f5231157234ae31754f5ffed0d5dbd936bd905f10d3b0e25
3
+ size 6307
runs/May24_20-42-26_890e8b3ca76b/events.out.tfevents.1716583387.890e8b3ca76b.34.4 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e83eaf9821d5367c3e81a47f83ef3e3da22c3eb4e21e6a5edaa39136b4cd55fb
3
+ size 6307
runs/May24_20-48-13_890e8b3ca76b/events.out.tfevents.1716583720.890e8b3ca76b.34.5 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5809329e6401c44df0ad3d49be3fd6b3e399b208768feecbf1e0e9f85d6c2341
3
+ size 19288
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:141b8c574fad469c69f1096f214b83f7c7364266f51555c4dd421e97e70dca21
3
+ size 4920
vocab.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"\uc644": 0, "\uc800": 1, "\ub797": 2, "\ucabd": 3, "\uc5c7": 4, "\ubbf8": 5, "\uc7a0": 6, "\ub17c": 7, "\uac9f": 8, "\ucef7": 9, "\uba4b": 10, "\ub611": 11, "\ub534": 12, "\ube60": 13, "\ub189": 14, "\uc090": 15, "\ub36e": 16, "\uafb9": 17, "\ub370": 18, "\ud138": 19, "\uaf3c": 20, "\ub5a0": 21, "\uce61": 22, "\ud480": 23, "\uc70c": 24, "\uac74": 25, "\uc2f6": 26, "\uad75": 27, "\ubb54": 28, "\ud604": 29, "\uc798": 30, "\ube68": 31, "\uae4d": 32, "\uac70": 33, "\ub139": 34, "\ub108": 35, "\ub128": 36, "\ucd0c": 37, "\ub9ce": 38, "\ub518": 39, "\ucda9": 40, "\uc794": 41, "\ub9db": 42, "\uba69": 43, "\ub538": 44, "\uc811": 45, "\ub9c8": 46, "\ud55c": 47, "\uc90d": 48, "\ub1a7": 49, "\ubab0": 50, "\uc601": 51, "\uc5f0": 52, "\ub2f9": 53, "\ub123": 54, "\uc12c": 55, "\uc27d": 56, "\ud615": 57, "\ub04c": 58, "\ucc38": 59, "\ud31c": 60, "\ucd95": 61, "\uc500": 62, "\uc694": 63, "\uadc0": 64, "\ucce5": 65, "\ud504": 66, "\uc5d4": 67, "\uacac": 68, "\uc80a": 69, "\uc78e": 70, "\ud0c1": 71, "\ub2ee": 72, "\ucf54": 73, "\uc5d0": 74, "\uca6c": 75, "\uae08": 76, "\uc5c8": 77, "\ub77d": 78, "\ud6fc": 79, "\ucc44": 80, "\ub798": 81, "\ub4f1": 82, "\uac16": 83, "\ubc84": 84, "\ub728": 85, "\ubd09": 86, "\uafb8": 87, "\ubd14": 88, "\uac90": 89, "\ud5e8": 90, "\uba64": 91, "\uacc4": 92, "\uc4f8": 93, "\uae40": 94, "\uadfc": 95, "\ubcbd": 96, "\ub9bd": 97, "\uc608": 98, "\ub978": 99, "\ub0ad": 100, "\ucc98": 101, "\uc369": 102, "\uc820": 103, "\uba4d": 104, "\uc82f": 105, "\ub461": 106, "\ud2c0": 107, "\ucbe4": 108, "\ubbfc": 109, "\ubc00": 110, "\uc92d": 111, "\uc058": 112, "\uc0c9": 113, "\uac1d": 114, "\uba3c": 115, "\ub0d0": 116, "\ub499": 117, "\uac10": 118, "\ud1a0": 119, "\uaf48": 120, "\uaca1": 121, "\uc9f8": 122, "\uace4": 123, "\ub298": 124, "\ub0b8": 125, "\uac1c": 126, "\uba55": 127, "\ub460": 128, "\uad7d": 129, "\ub35c": 130, "\ub2d0": 131, "\uc464": 132, "\uc88b": 133, "\ub9dd": 134, "\ubb58": 135, "\ud63c": 136, "\ud31f": 137, "\ud5d0": 138, "\uc57d": 139, "\ud0c7": 140, "\ub625": 141, "\uce68": 142, "\ub374": 143, "\uc368": 144, "\ud070": 145, "\ubc29": 146, "\ubc1f": 147, "\uacaa": 148, "\ud310": 149, "\uc78a": 150, "\ub07c": 151, "\uce59": 152, "\ub8e9": 153, "\ud45c": 154, "\ud15d": 155, "\ub0c9": 156, "\uc904": 157, "\uadf8": 158, "\uad70": 159, "\ud5f4": 160, "\uc2eb": 161, "\uc2ac": 162, "\uac13": 163, "\uc8fd": 164, "\uad6c": 165, "\uc815": 166, "\uace1": 167, "\ub364": 168, "\uae68": 169, "\ub8fd": 170, "\uae50": 171, "\ub9b0": 172, "\ubb63": 173, "\ud0c4": 174, "\ub0a0": 175, "\uc5c5": 176, "\ub118": 177, "\uce60": 178, "\uc6b8": 179, "\ub791": 180, "\ubc8b": 181, "\ubcbc": 182, "\ub3cc": 183, "\uac8c": 184, "\uc5d8": 185, "\uacf0": 186, "\ub141": 187, "\ud758": 188, "\ube44": 189, "\ubb38": 190, "\ubcc4": 191, "\uafe9": 192, "\uae5c": 193, "\uad00": 194, "\ud3b8": 195, "\ub0ab": 196, "\ud3c9": 197, "\uce69": 198, "\uc797": 199, "\ud1b5": 200, "\ub290": 201, "\ubb18": 202, "\ud751": 203, "\uad7c": 204, "\ud074": 205, "\uc557": 206, "\uc190": 207, "\uc84b": 208, "\uaf2d": 209, ",": 210, "\uc911": 211, "\ubcfc": 212, "\uccd0": 213, "\uaddc": 214, "\ud314": 215, "\uc0c1": 216, "\uc2ed": 217, "\uc13c": 218, "\uc5bc": 219, "\ubb3b": 220, "\uc2b7": 221, "\uba65": 222, "\uc370": 223, "\ub51c": 224, "\uac78": 225, "\ub8cc": 226, "\ub2f5": 227, "\ub837": 228, "\uc790": 229, "\ub4f8": 230, "\ud321": 231, "\ubc8c": 232, "\uacf5": 233, "\ud0dc": 234, "\uba40": 235, "\ubd10": 236, "\uba38": 237, "\ubcf5": 238, "\ub7ff": 239, "\ub985": 240, "\ubeff": 241, "\ubc18": 242, "\uce78": 243, "\uc2ec": 244, "\ud48d": 245, "\uc18c": 246, "\ub2ff": 247, "\ucfe0": 248, "\uba39": 249, "\ub9b4": 250, "\uc77c": 251, "\ub808": 252, "\ub9bc": 253, "\ucc28": 254, "\ud154": 255, "\ub531": 256, "\uc384": 257, "\ub801": 258, "\ub418": 259, "\uad6d": 260, "\ub5b5": 261, "\ud130": 262, "\ud6a8": 263, "\uae38": 264, "\uc591": 265, "\uc218": 266, "\uc554": 267, "\uc124": 268, "\uc2f8": 269, "\ub7ec": 270, "\ub458": 271, "\ubc88": 272, "\uad74": 273, "\ub110": 274, "\ucd94": 275, "\ubabb": 276, "\uc0bc": 277, "\ub828": 278, "\ub46c": 279, "\ud37c": 280, "\uae5f": 281, "\uc9d1": 282, "\uc0cc": 283, "\uc606": 284, "\ud788": 285, "\uaca0": 286, "\uadf9": 287, "\ub97c": 288, "\uc704": 289, "\uacfc": 290, "\uc6b4": 291, "\ubcc0": 292, "\ube48": 293, "\uce74": 294, "\uc2dc": 295, "\uac00": 296, "\uc584": 297, "\ub550": 298, "\ub4ec": 299, "\ud5d8": 300, "\ub488": 301, "\uaf2c": 302, "\uc788": 303, "\ubaa8": 304, "\ud638": 305, "\uc2f1": 306, "\ub7c9": 307, "\uba67": 308, "\uaed1": 309, "\ub148": 310, "\uc14b": 311, "\ucf00": 312, "\uaf34": 313, "\ud280": 314, "\uc624": 315, "\ube57": 316, "\ud478": 317, "\ubb47": 318, "\uc2f9": 319, "\ubc25": 320, "\uc2a8": 321, "\uba70": 322, "\uc90f": 323, "\uada4": 324, "\uac11": 325, "\ud6cd": 326, "\ub179": 327, "\uc0b4": 328, "\ub4e4": 329, "\uc831": 330, "\ud559": 331, "\ud230": 332, "\uc52c": 333, "\uc838": 334, "\uc796": 335, "\ub150": 336, "\uc2a4": 337, "\ub048": 338, "\uc824": 339, "\ud5f7": 340, "\ucee4": 341, "\ub974": 342, "\ucc45": 343, "\ub151": 344, "\uc4f0": 345, "\ub2cc": 346, "\uc73c": 347, "\uc37b": 348, "\uaef4": 349, "\uce5c": 350, "\ubf51": 351, "\uc131": 352, "\ub0e5": 353, "\ub214": 354, "\uc5b5": 355, "\uc22b": 356, "\uacbd": 357, "\ub4a4": 358, "\ucf20": 359, "\ubabd": 360, "\uad58": 361, "\ub2f4": 362, "\uc881": 363, "\uc81c": 364, "\uae14": 365, "\uc548": 366, "\uc880": 367, "\uc740": 368, "\uccd4": 369, "\ub2ec": 370, "\uc0db": 371, "\ub530": 372, "\uc1a1": 373, "\ub840": 374, "?": 375, "\ub780": 376, "\ub9e4": 377, "\ucd5c": 378, "\uc83c": 379, "\ub807": 380, "\ub465": 381, "\uc538": 382, "\ub3c4": 383, "\uae30": 384, "\uc220": 385, "\ub8e8": 386, "\ub860": 387, "\uccad": 388, "\uc9c0": 389, "\ub0b3": 390, "\ud574": 391, "\uc545": 392, "\uace8": 393, "\ub824": 394, "\ud569": 395, "\uc14d": 396, "\ubfdc": 397, "\ubcd1": 398, "\ub481": 399, "\uc2c0": 400, "\ubb50": 401, "\ub4fc": 402, "\ud30c": 403, "\ub198": 404, "\ub355": 405, "\ub057": 406, "\uc6a9": 407, "\ub354": 408, "\ub178": 409, "\uae34": 410, "\ub5a8": 411, "\ubca1": 412, "\uad7f": 413, "\uc530": 414, "\uc54a": 415, "\ubcb5": 416, "\uc6c3": 417, "\uc870": 418, "\ud22c": 419, "\uba87": 420, "\uad50": 421, "\ud2b8": 422, "\ub871": 423, "\ubd84": 424, "\ub144": 425, "\ub420": 426, "\ub834": 427, "\uba74": 428, "\uc49c": 429, "\ud5f9": 430, "\uaecf": 431, "\ubc97": 432, "\ud68c": 433, "\ub2e8": 434, "\uc758": 435, "\uac19": 436, "\ubfcc": 437, "\uc6d0": 438, "\ucf69": 439, "\ub825": 440, "\ucc29": 441, "\uc9c8": 442, "\uc2e4": 443, "\ub294": 444, "\uc998": 445, "\uc778": 446, "\uacb8": 447, "\uaf07": 448, "\uc934": 449, "\ud2bf": 450, "\ud734": 451, "\uacb0": 452, "\ub839": 453, "\uacbb": 454, "\uccb4": 455, "\ub958": 456, "\ub7b5": 457, "\ubb34": 458, "\ud06c": 459, "\uc094": 460, "\ucef4": 461, "\uce6d": 462, "\uc9e4": 463, "\ub7f0": 464, "\ud544": 465, "\ubaa9": 466, "\ub9c9": 467, "\ub2a0": 468, "\ub9bf": 469, "\ub85d": 470, "\ub358": 471, "\uc544": 472, "\uaca9": 473, "\uc5f4": 474, "\ud655": 475, "\ub2e4": 476, "\ubc15": 477, "\uc9c1": 478, "\uaf3d": 479, "\uc313": 480, "\ub044": 481, "\ud5d9": 482, "\ucb64": 483, "\ub77c": 484, "\ub9cc": 485, "\ub274": 486, "\uc8fc": 487, "\ucc9c": 488, "\uc120": 489, "\ubc16": 490, "\uc5fc": 491, "\uc2b9": 492, "\ud5e4": 493, "\ub098": 494, "\ucc2e": 495, "\ubd05": 496, "\uc0dd": 497, "\ud640": 498, "\ub4b7": 499, "\ucabc": 500, "\uac83": 501, "\ud1f4": 502, "\uccb8": 503, "\ub299": 504, "\uacf1": 505, "\uc37d": 506, "\ub300": 507, "\ubc11": 508, "\ud6c4": 509, "\uba58": 510, "\uaff0": 511, "\ud5c8": 512, "\uacf3": 513, "\uc653": 514, "\uc751": 515, "\uc5e3": 516, "\uc784": 517, "\uca99": 518, "\uc7a5": 519, "\uc871": 520, "\ub194": 521, "\uade4": 522, "\uaed8": 523, "\ub73b": 524, "\uc158": 525, "\ub054": 526, "\ub4e0": 527, "\uc640": 528, "\ubb35": 529, "\uc57c": 530, "\ubc14": 531, "\ubcf4": 532, "\uc11d": 533, "\uac77": 534, "\uc5c6": 535, "\ucc1d": 536, "\ucd10": 537, "\ub81b": 538, "\ud65c": 539, "\ud0a4": 540, "\ub878": 541, "\ub4dc": 542, "\ub2b4": 543, "\ub3c5": 544, "\uc90c": 545, "\ud654": 546, "\uc60c": 547, "\uc918": 548, "\ud2f0": 549, "\uc75c": 550, "\ub5a1": 551, "\uc2dd": 552, "\uc529": 553, "\ubd81": 554, "\ucd9c": 555, "\uc74d": 556, "\ud568": 557, "\ud07c": 558, "\ub9ac": 559, "\uc988": 560, "\uc5c9": 561, "\uac15": 562, "\ub78c": 563, "\ubd90": 564, "\uc628": 565, "\uc300": 566, "\uba54": 567, "\ub2a6": 568, "\ud78c": 569, "\ud56d": 570, "\ud134": 571, "\uc0ac": 572, "\ub124": 573, "\ub369": 574, "\ub193": 575, "\u1161": 576, "\uc2b5": 577, "\uc167": 578, "\ub80c": 579, "\ub1a9": 580, "\uc7ac": 581, "\ub153": 582, "\uac24": 583, "\ubcb3": 584, "\ud1a7": 585, "\ubc24": 586, "\ud6c8": 587, "\uc61b": 588, "\ubd88": 589, "\uc6b1": 590, "\uc874": 591, "\uc6d4": 592, "\ub7fc": 593, "\uc785": 594, "\uc54c": 595, "\ube4c": 596, "\ub490": 597, "\ubed0": 598, "\ucd98": 599, "\u11af": 600, "\uc720": 601, "\ub47f": 602, "\ub1a4": 603, "\uc804": 604, "\uc816": 605, "\uba85": 606, "\ud488": 607, "\ubc85": 608, "\uc11c": 609, "\ud798": 610, "\uc250": 611, "\uc9c7": 612, "\uc55a": 613, "\ud0d1": 614, "\uac85": 615, "\uc721": 616, "\ud611": 617, "\uc74c": 618, "\uc30d": 619, "\uc598": 620, "\uc744": 621, "\uc774": 622, "\ub5bb": 623, "\uc791": 624, ".": 625, "\ub220": 626, "\ubca8": 627, "\uc495": 628, "\uacc1": 629, "\ub9f7": 630, "\ud3ec": 631, "\uc55e": 632, "\uac08": 633, "\ubbff": 634, "\uc5b8": 635, "\uace0": 636, "\ub3d9": 637, "\uafc0": 638, "\ubd25": 639, "\ud2b9": 640, "\ub8d0": 641, "\ucc30": 642, "\uac2f": 643, "\ub9de": 644, "\ub3fc": 645, "\uc7c1": 646, "\ubca0": 647, "\ub09c": 648, "\uae4c": 649, "\ud639": 650, "\ubc27": 651, "\ub78f": 652, "\ubcf8": 653, "\ucb49": 654, "\uc9d3": 655, "\uad82": 656, "\uc9f1": 657, "\ubc94": 658, "\uc787": 659, "\ubc95": 660, "\ud2bc": 661, "\ub20c": 662, "\uc5ec": 663, "\uc62e": 664, "\uc5b4": 665, "\ub984": 666, "\ud53c": 667, "\uac04": 668, "\ubd99": 669, "\ud14c": 670, "\ub9d0": 671, "\ud560": 672, "\ucca0": 673, "\uc138": 674, "\uac07": 675, "\uc5bd": 676, "\u1101": 677, "\uc50c": 678, "\uc6c5": 679, "\uc18d": 680, "\uc528": 681, "\ubc30": 682, "\uc559": 683, "\uc6e8": 684, "\ub2c8": 685, "\ub155": 686, "\ub9e8": 687, "\uc6b0": 688, "\ud750": 689, "\uc560": 690, "\uc5ed": 691, "\ub140": 692, "\uae61": 693, "\uc5ff": 694, "\ub54c": 695, "\ub7fd": 696, "\uc65c": 697, "\ubc1c": 698, "\uc84d": 699, "\ub137": 700, "\uc808": 701, "\ub0a8": 702, "\uca4c": 703, "\uc9d0": 704, "\ub055": 705, "\uad11": 706, "\uc580": 707, "\uc637": 708, "\ub0ae": 709, "\uc775": 710, "\ucf1c": 711, "\uca0b": 712, "\uc5e5": 713, "\uc678": 714, "\ub2d9": 715, "\ubd80": 716, "\uc26c": 717, "\ud5cc": 718, "\ud76c": 719, "\ud0d3": 720, "\ubb3c": 721, "\ub835": 722, "\uae00": 723, "\ub10c": 724, "\ub0bc": 725, "\uac01": 726, "\uc99d": 727, "\ubfd4": 728, "\ub35f": 729, "\ubd04": 730, "\uc2e0": 731, "\uc813": 732, "\ub610": 733, "\ub85c": 734, "\ud754": 735, "\uce6b": 736, "\ud0c0": 737, "\uc12f": 738, "\uac20": 739, "\uc9dd": 740, "\uaca8": 741, "\uae54": 742, "\uc655": 743, "\ub204": 744, "\ub498": 745, "\ub545": 746, "\ubc31": 747, "\ub5a4": 748, "\ub0b4": 749, "\ub385": 750, "\uc9c4": 751, "\ub2d8": 752, "\ub188": 753, "\ub3c8": 754, "\ub48c": 755, "\ub180": 756, "\ub05d": 757, "\ucacc": 758, "\uc21c": 759, "\uc0c8": 760, "\uc6c0": 762, "\uac81": 763, "\uc989": 764, "\ub450": 765, "\ud034": 766, "\uc0b0": 767, "\ud391": 768, "\ucc3e": 769, "\ub4ef": 770, "\ub81d": 771, "\uad81": 772, "\ub72c": 773, "\ucc0d": 774, "\ucc2c": 775, "\ubfd0": 776, "\ub2e5": 777, "\uc900": 778, "\uc885": 779, "\ubbc4": 780, "\uc6c1": 781, "\uac80": 782, "\ubc1b": 783, "\ud150": 784, "\ub18d": 785, "\ud2c8": 786, "\uc5bb": 787, "\uaebc": 788, "\ud558": 789, "\uce58": 790, "\ub514": 791, "\uc62c": 792, "\ucd08": 793, "\ubd23": 794, "\uc801": 795, "\uba83": 796, "\ub7ed": 797, "|": 761, "[UNK]": 798, "[PAD]": 799}
wandb/debug-internal.log ADDED
The diff for this file is too large to render. See raw diff
 
wandb/debug.log ADDED
@@ -0,0 +1,217 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ 2024-05-24 20:27:37,417 INFO MainThread:34 [wandb_setup.py:_flush():76] Current SDK version is 0.16.6
2
+ 2024-05-24 20:27:37,417 INFO MainThread:34 [wandb_setup.py:_flush():76] Configure stats pid to 34
3
+ 2024-05-24 20:27:37,417 INFO MainThread:34 [wandb_setup.py:_flush():76] Loading settings from /root/.config/wandb/settings
4
+ 2024-05-24 20:27:37,417 INFO MainThread:34 [wandb_setup.py:_flush():76] Loading settings from /kaggle/working/wandb/settings
5
+ 2024-05-24 20:27:37,417 INFO MainThread:34 [wandb_setup.py:_flush():76] Loading settings from environment variables: {}
6
+ 2024-05-24 20:27:37,417 INFO MainThread:34 [wandb_setup.py:_flush():76] Applying setup settings: {'_disable_service': False}
7
+ 2024-05-24 20:27:37,417 INFO MainThread:34 [wandb_setup.py:_flush():76] Inferring run settings from compute environment: {'program': '<python with no main file>'}
8
+ 2024-05-24 20:27:37,417 INFO MainThread:34 [wandb_setup.py:_flush():76] Applying login settings: {}
9
+ 2024-05-24 20:27:37,417 INFO MainThread:34 [wandb_setup.py:_flush():76] Applying login settings: {'api_key': '***REDACTED***'}
10
+ 2024-05-24 20:27:37,417 INFO MainThread:34 [wandb_init.py:_log_setup():521] Logging user logs to /kaggle/working/wandb/run-20240524_202737-n1w0kmmv/logs/debug.log
11
+ 2024-05-24 20:27:37,417 INFO MainThread:34 [wandb_init.py:_log_setup():522] Logging internal logs to /kaggle/working/wandb/run-20240524_202737-n1w0kmmv/logs/debug-internal.log
12
+ 2024-05-24 20:27:37,417 INFO MainThread:34 [wandb_init.py:_jupyter_setup():467] configuring jupyter hooks <wandb.sdk.wandb_init._WandbInit object at 0x785a802b9120>
13
+ 2024-05-24 20:27:37,418 INFO MainThread:34 [wandb_init.py:init():561] calling init triggers
14
+ 2024-05-24 20:27:37,418 INFO MainThread:34 [wandb_init.py:init():568] wandb.init called with sweep_config: {}
15
+ config: {}
16
+ 2024-05-24 20:27:37,418 INFO MainThread:34 [wandb_init.py:init():611] starting backend
17
+ 2024-05-24 20:27:37,418 INFO MainThread:34 [wandb_init.py:init():615] setting up manager
18
+ 2024-05-24 20:27:37,420 INFO MainThread:34 [backend.py:_multiprocessing_setup():105] multiprocessing start_methods=fork,spawn,forkserver, using: spawn
19
+ 2024-05-24 20:27:37,423 INFO MainThread:34 [wandb_init.py:init():623] backend started and connected
20
+ 2024-05-24 20:27:37,434 INFO MainThread:34 [wandb_run.py:_label_probe_notebook():1299] probe notebook
21
+ 2024-05-24 20:27:38,191 INFO MainThread:34 [wandb_init.py:init():715] updated telemetry
22
+ 2024-05-24 20:27:38,195 INFO MainThread:34 [wandb_init.py:init():748] communicating run to backend with 90.0 second timeout
23
+ 2024-05-24 20:27:38,448 INFO MainThread:34 [wandb_run.py:_on_init():2357] communicating current version
24
+ 2024-05-24 20:27:38,512 INFO MainThread:34 [wandb_run.py:_on_init():2366] got version response upgrade_message: "wandb version 0.17.0 is available! To upgrade, please run:\n $ pip install wandb --upgrade"
25
+
26
+ 2024-05-24 20:27:38,514 INFO MainThread:34 [wandb_init.py:init():799] starting run threads in backend
27
+ 2024-05-24 20:27:54,627 INFO MainThread:34 [wandb_run.py:_console_start():2335] atexit reg
28
+ 2024-05-24 20:27:54,628 INFO MainThread:34 [wandb_run.py:_redirect():2190] redirect: wrap_raw
29
+ 2024-05-24 20:27:54,628 INFO MainThread:34 [wandb_run.py:_redirect():2255] Wrapping output streams.
30
+ 2024-05-24 20:27:54,629 INFO MainThread:34 [wandb_run.py:_redirect():2280] Redirects installed.
31
+ 2024-05-24 20:27:54,630 INFO MainThread:34 [wandb_init.py:init():842] run started, returning control to user process
32
+ 2024-05-24 20:27:54,637 INFO MainThread:34 [wandb_run.py:_config_callback():1347] config_cb None None {'return_dict': True, 'output_hidden_states': False, 'output_attentions': False, 'torchscript': False, 'torch_dtype': None, 'use_bfloat16': False, 'tf_legacy_loss': False, 'pruned_heads': {}, 'tie_word_embeddings': True, 'chunk_size_feed_forward': 0, 'is_encoder_decoder': False, 'is_decoder': False, 'cross_attention_hidden_size': None, 'add_cross_attention': False, 'tie_encoder_decoder': False, 'max_length': 20, 'min_length': 0, 'do_sample': False, 'early_stopping': False, 'num_beams': 1, 'num_beam_groups': 1, 'diversity_penalty': 0.0, 'temperature': 1.0, 'top_k': 50, 'top_p': 1.0, 'typical_p': 1.0, 'repetition_penalty': 1.0, 'length_penalty': 1.0, 'no_repeat_ngram_size': 0, 'encoder_no_repeat_ngram_size': 0, 'bad_words_ids': None, 'num_return_sequences': 1, 'output_scores': False, 'return_dict_in_generate': False, 'forced_bos_token_id': None, 'forced_eos_token_id': None, 'remove_invalid_values': False, 'exponential_decay_length_penalty': None, 'suppress_tokens': None, 'begin_suppress_tokens': None, 'architectures': ['Wav2Vec2ForPreTraining'], 'finetuning_task': None, 'id2label': {0: 'LABEL_0', 1: 'LABEL_1'}, 'label2id': {'LABEL_0': 0, 'LABEL_1': 1}, 'tokenizer_class': None, 'prefix': None, 'bos_token_id': 1, 'pad_token_id': 0, 'eos_token_id': 2, 'sep_token_id': None, 'decoder_start_token_id': None, 'task_specific_params': None, 'problem_type': None, '_name_or_path': 'facebook/wav2vec2-base', 'transformers_version': '4.39.3', 'freeze_feat_extract_train': True, 'mask_channel_length': 10, 'mask_channel_min_space': 1, 'mask_channel_other': 0.0, 'mask_channel_prob': 0.0, 'mask_channel_selection': 'static', 'mask_time_min_space': 1, 'mask_time_other': 0.0, 'mask_time_selection': 'static', 'model_type': 'wav2vec2', 'no_mask_channel_overlap': False, 'no_mask_time_overlap': False, 'num_feat_extract_layers': 7, 'hidden_size': 768, 'feat_extract_norm': 'group', 'feat_extract_activation': 'gelu', 'conv_dim': [512, 512, 512, 512, 512, 512, 512], 'conv_stride': [5, 2, 2, 2, 2, 2, 2], 'conv_kernel': [10, 3, 3, 3, 3, 2, 2], 'conv_bias': False, 'num_conv_pos_embeddings': 128, 'num_conv_pos_embedding_groups': 16, 'num_hidden_layers': 12, 'intermediate_size': 3072, 'hidden_act': 'gelu', 'num_attention_heads': 12, 'hidden_dropout': 0.1, 'attention_dropout': 0.1, 'activation_dropout': 0.0, 'feat_proj_dropout': 0.1, 'final_dropout': 0.0, 'layerdrop': 0.0, 'layer_norm_eps': 1e-05, 'initializer_range': 0.02, 'vocab_size': 32, 'do_stable_layer_norm': False, 'use_weighted_layer_sum': False, 'apply_spec_augment': True, 'mask_time_prob': 0.05, 'mask_time_length': 10, 'mask_time_min_masks': 2, 'mask_feature_prob': 0.0, 'mask_feature_length': 10, 'mask_feature_min_masks': 0, 'num_codevectors_per_group': 320, 'num_codevector_groups': 2, 'contrastive_logits_temperature': 0.1, 'feat_quantizer_dropout': 0.0, 'num_negatives': 100, 'codevector_dim': 256, 'proj_codevector_dim': 256, 'diversity_loss_weight': 0.1, 'ctc_loss_reduction': 'sum', 'ctc_zero_infinity': False, 'add_adapter': False, 'adapter_kernel_size': 3, 'adapter_stride': 2, 'num_adapter_layers': 3, 'output_hidden_size': 768, 'adapter_attn_dim': None, 'classifier_proj_size': 256, 'tdnn_dim': [512, 512, 512, 512, 1500], 'tdnn_kernel': [5, 3, 3, 1, 1], 'tdnn_dilation': [1, 2, 3, 1, 1], 'xvector_output_dim': 512, 'output_dir': '/kaggle/working/', 'overwrite_output_dir': False, 'do_train': False, 'do_eval': True, 'do_predict': False, 'evaluation_strategy': 'steps', 'prediction_loss_only': False, 'per_device_train_batch_size': 32, 'per_device_eval_batch_size': 8, 'per_gpu_train_batch_size': None, 'per_gpu_eval_batch_size': None, 'gradient_accumulation_steps': 1, 'eval_accumulation_steps': None, 'eval_delay': 0, 'learning_rate': 0.0001, 'weight_decay': 0.005, 'adam_beta1': 0.9, 'adam_beta2': 0.999, 'adam_epsilon': 1e-08, 'max_grad_norm': 1.0, 'num_train_epochs': 30, 'max_steps': -1, 'lr_scheduler_type': 'linear', 'lr_scheduler_kwargs': {}, 'warmup_ratio': 0.0, 'warmup_steps': 1000, 'log_level': 'passive', 'log_level_replica': 'warning', 'log_on_each_node': True, 'logging_dir': '/kaggle/working/runs/May24_20-24-49_890e8b3ca76b', 'logging_strategy': 'steps', 'logging_first_step': False, 'logging_steps': 500, 'logging_nan_inf_filter': True, 'save_strategy': 'steps', 'save_steps': 500, 'save_total_limit': 2, 'save_safetensors': True, 'save_on_each_node': False, 'save_only_model': False, 'no_cuda': False, 'use_cpu': False, 'use_mps_device': False, 'seed': 42, 'data_seed': None, 'jit_mode_eval': False, 'use_ipex': False, 'bf16': False, 'fp16': True, 'fp16_opt_level': 'O1', 'half_precision_backend': 'auto', 'bf16_full_eval': False, 'fp16_full_eval': False, 'tf32': None, 'local_rank': 0, 'ddp_backend': None, 'tpu_num_cores': None, 'tpu_metrics_debug': False, 'debug': [], 'dataloader_drop_last': False, 'eval_steps': 500, 'dataloader_num_workers': 0, 'dataloader_prefetch_factor': None, 'past_index': -1, 'run_name': '/kaggle/working/', 'disable_tqdm': False, 'remove_unused_columns': True, 'label_names': None, 'load_best_model_at_end': False, 'metric_for_best_model': None, 'greater_is_better': None, 'ignore_data_skip': False, 'fsdp': [], 'fsdp_min_num_params': 0, 'fsdp_config': {'min_num_params': 0, 'xla': False, 'xla_fsdp_v2': False, 'xla_fsdp_grad_ckpt': False}, 'fsdp_transformer_layer_cls_to_wrap': None, 'accelerator_config': {'split_batches': False, 'dispatch_batches': None, 'even_batches': True, 'use_seedable_sampler': True}, 'deepspeed': None, 'label_smoothing_factor': 0.0, 'optim': 'adamw_torch', 'optim_args': None, 'adafactor': False, 'group_by_length': True, 'length_column_name': 'length', 'report_to': ['tensorboard', 'wandb'], 'ddp_find_unused_parameters': None, 'ddp_bucket_cap_mb': None, 'ddp_broadcast_buffers': None, 'dataloader_pin_memory': True, 'dataloader_persistent_workers': False, 'skip_memory_metrics': True, 'use_legacy_prediction_loop': False, 'push_to_hub': True, 'resume_from_checkpoint': None, 'hub_model_id': None, 'hub_strategy': 'every_save', 'hub_token': '<HUB_TOKEN>', 'hub_private_repo': False, 'hub_always_push': False, 'gradient_checkpointing': True, 'gradient_checkpointing_kwargs': None, 'include_inputs_for_metrics': False, 'fp16_backend': 'auto', 'push_to_hub_model_id': None, 'push_to_hub_organization': None, 'push_to_hub_token': '<PUSH_TO_HUB_TOKEN>', 'mp_parameters': '', 'auto_find_batch_size': False, 'full_determinism': False, 'torchdynamo': None, 'ray_scope': 'last', 'ddp_timeout': 1800, 'torch_compile': False, 'torch_compile_backend': None, 'torch_compile_mode': None, 'dispatch_batches': None, 'split_batches': None, 'include_tokens_per_second': False, 'include_num_input_tokens_seen': False, 'neftune_noise_alpha': None, 'optim_target_modules': None}
33
+ 2024-05-24 20:28:04,489 INFO MainThread:34 [jupyter.py:save_ipynb():373] not saving jupyter notebook
34
+ 2024-05-24 20:28:04,489 INFO MainThread:34 [wandb_init.py:_pause_backend():432] pausing backend
35
+ 2024-05-24 20:28:58,368 INFO MainThread:34 [wandb_init.py:_resume_backend():437] resuming backend
36
+ 2024-05-24 20:28:59,625 INFO MainThread:34 [jupyter.py:save_ipynb():373] not saving jupyter notebook
37
+ 2024-05-24 20:28:59,625 INFO MainThread:34 [wandb_init.py:_pause_backend():432] pausing backend
38
+ 2024-05-24 20:29:08,949 INFO MainThread:34 [wandb_init.py:_resume_backend():437] resuming backend
39
+ 2024-05-24 20:29:08,951 INFO MainThread:34 [jupyter.py:save_ipynb():373] not saving jupyter notebook
40
+ 2024-05-24 20:29:08,951 INFO MainThread:34 [wandb_init.py:_pause_backend():432] pausing backend
41
+ 2024-05-24 20:29:12,141 INFO MainThread:34 [wandb_init.py:_resume_backend():437] resuming backend
42
+ 2024-05-24 20:29:16,856 INFO MainThread:34 [jupyter.py:save_ipynb():373] not saving jupyter notebook
43
+ 2024-05-24 20:29:16,857 INFO MainThread:34 [wandb_init.py:_pause_backend():432] pausing backend
44
+ 2024-05-24 20:29:20,764 INFO MainThread:34 [wandb_init.py:_resume_backend():437] resuming backend
45
+ 2024-05-24 20:29:24,478 INFO MainThread:34 [jupyter.py:save_ipynb():373] not saving jupyter notebook
46
+ 2024-05-24 20:29:24,478 INFO MainThread:34 [wandb_init.py:_pause_backend():432] pausing backend
47
+ 2024-05-24 20:29:30,221 INFO MainThread:34 [wandb_init.py:_resume_backend():437] resuming backend
48
+ 2024-05-24 20:30:45,072 INFO MainThread:34 [jupyter.py:save_ipynb():373] not saving jupyter notebook
49
+ 2024-05-24 20:30:45,072 INFO MainThread:34 [wandb_init.py:_pause_backend():432] pausing backend
50
+ 2024-05-24 20:30:58,707 INFO MainThread:34 [wandb_init.py:_resume_backend():437] resuming backend
51
+ 2024-05-24 20:30:58,709 INFO MainThread:34 [jupyter.py:save_ipynb():373] not saving jupyter notebook
52
+ 2024-05-24 20:30:58,710 INFO MainThread:34 [wandb_init.py:_pause_backend():432] pausing backend
53
+ 2024-05-24 20:31:02,188 INFO MainThread:34 [wandb_init.py:_resume_backend():437] resuming backend
54
+ 2024-05-24 20:31:02,613 INFO MainThread:34 [jupyter.py:save_ipynb():373] not saving jupyter notebook
55
+ 2024-05-24 20:31:02,613 INFO MainThread:34 [wandb_init.py:_pause_backend():432] pausing backend
56
+ 2024-05-24 20:31:04,725 INFO MainThread:34 [wandb_init.py:_resume_backend():437] resuming backend
57
+ 2024-05-24 20:31:04,727 INFO MainThread:34 [jupyter.py:save_ipynb():373] not saving jupyter notebook
58
+ 2024-05-24 20:31:04,727 INFO MainThread:34 [wandb_init.py:_pause_backend():432] pausing backend
59
+ 2024-05-24 20:31:07,065 INFO MainThread:34 [wandb_init.py:_resume_backend():437] resuming backend
60
+ 2024-05-24 20:31:07,298 INFO MainThread:34 [jupyter.py:save_ipynb():373] not saving jupyter notebook
61
+ 2024-05-24 20:31:07,298 INFO MainThread:34 [wandb_init.py:_pause_backend():432] pausing backend
62
+ 2024-05-24 20:31:10,187 INFO MainThread:34 [wandb_init.py:_resume_backend():437] resuming backend
63
+ 2024-05-24 20:31:10,193 INFO MainThread:34 [jupyter.py:save_ipynb():373] not saving jupyter notebook
64
+ 2024-05-24 20:31:10,193 INFO MainThread:34 [wandb_init.py:_pause_backend():432] pausing backend
65
+ 2024-05-24 20:31:12,404 INFO MainThread:34 [wandb_init.py:_resume_backend():437] resuming backend
66
+ 2024-05-24 20:31:50,143 INFO MainThread:34 [wandb_run.py:_config_callback():1347] config_cb None None {'return_dict': True, 'output_hidden_states': False, 'output_attentions': False, 'torchscript': False, 'torch_dtype': None, 'use_bfloat16': False, 'tf_legacy_loss': False, 'pruned_heads': {}, 'tie_word_embeddings': True, 'chunk_size_feed_forward': 0, 'is_encoder_decoder': False, 'is_decoder': False, 'cross_attention_hidden_size': None, 'add_cross_attention': False, 'tie_encoder_decoder': False, 'max_length': 20, 'min_length': 0, 'do_sample': False, 'early_stopping': False, 'num_beams': 1, 'num_beam_groups': 1, 'diversity_penalty': 0.0, 'temperature': 1.0, 'top_k': 50, 'top_p': 1.0, 'typical_p': 1.0, 'repetition_penalty': 1.0, 'length_penalty': 1.0, 'no_repeat_ngram_size': 0, 'encoder_no_repeat_ngram_size': 0, 'bad_words_ids': None, 'num_return_sequences': 1, 'output_scores': False, 'return_dict_in_generate': False, 'forced_bos_token_id': None, 'forced_eos_token_id': None, 'remove_invalid_values': False, 'exponential_decay_length_penalty': None, 'suppress_tokens': None, 'begin_suppress_tokens': None, 'architectures': ['Wav2Vec2ForPreTraining'], 'finetuning_task': None, 'id2label': {0: 'LABEL_0', 1: 'LABEL_1'}, 'label2id': {'LABEL_0': 0, 'LABEL_1': 1}, 'tokenizer_class': None, 'prefix': None, 'bos_token_id': 1, 'pad_token_id': 0, 'eos_token_id': 2, 'sep_token_id': None, 'decoder_start_token_id': None, 'task_specific_params': None, 'problem_type': None, '_name_or_path': 'facebook/wav2vec2-base', 'transformers_version': '4.39.3', 'freeze_feat_extract_train': True, 'mask_channel_length': 10, 'mask_channel_min_space': 1, 'mask_channel_other': 0.0, 'mask_channel_prob': 0.0, 'mask_channel_selection': 'static', 'mask_time_min_space': 1, 'mask_time_other': 0.0, 'mask_time_selection': 'static', 'model_type': 'wav2vec2', 'no_mask_channel_overlap': False, 'no_mask_time_overlap': False, 'num_feat_extract_layers': 7, 'hidden_size': 768, 'feat_extract_norm': 'group', 'feat_extract_activation': 'gelu', 'conv_dim': [512, 512, 512, 512, 512, 512, 512], 'conv_stride': [5, 2, 2, 2, 2, 2, 2], 'conv_kernel': [10, 3, 3, 3, 3, 2, 2], 'conv_bias': False, 'num_conv_pos_embeddings': 128, 'num_conv_pos_embedding_groups': 16, 'num_hidden_layers': 12, 'intermediate_size': 3072, 'hidden_act': 'gelu', 'num_attention_heads': 12, 'hidden_dropout': 0.1, 'attention_dropout': 0.1, 'activation_dropout': 0.0, 'feat_proj_dropout': 0.1, 'final_dropout': 0.0, 'layerdrop': 0.0, 'layer_norm_eps': 1e-05, 'initializer_range': 0.02, 'vocab_size': 32, 'do_stable_layer_norm': False, 'use_weighted_layer_sum': False, 'apply_spec_augment': True, 'mask_time_prob': 0.05, 'mask_time_length': 10, 'mask_time_min_masks': 2, 'mask_feature_prob': 0.0, 'mask_feature_length': 10, 'mask_feature_min_masks': 0, 'num_codevectors_per_group': 320, 'num_codevector_groups': 2, 'contrastive_logits_temperature': 0.1, 'feat_quantizer_dropout': 0.0, 'num_negatives': 100, 'codevector_dim': 256, 'proj_codevector_dim': 256, 'diversity_loss_weight': 0.1, 'ctc_loss_reduction': 'sum', 'ctc_zero_infinity': False, 'add_adapter': False, 'adapter_kernel_size': 3, 'adapter_stride': 2, 'num_adapter_layers': 3, 'output_hidden_size': 768, 'adapter_attn_dim': None, 'classifier_proj_size': 256, 'tdnn_dim': [512, 512, 512, 512, 1500], 'tdnn_kernel': [5, 3, 3, 1, 1], 'tdnn_dilation': [1, 2, 3, 1, 1], 'xvector_output_dim': 512, 'output_dir': '/kaggle/working/', 'overwrite_output_dir': False, 'do_train': False, 'do_eval': True, 'do_predict': False, 'evaluation_strategy': 'steps', 'prediction_loss_only': False, 'per_device_train_batch_size': 32, 'per_device_eval_batch_size': 8, 'per_gpu_train_batch_size': None, 'per_gpu_eval_batch_size': None, 'gradient_accumulation_steps': 1, 'eval_accumulation_steps': None, 'eval_delay': 0, 'learning_rate': 0.0001, 'weight_decay': 0.005, 'adam_beta1': 0.9, 'adam_beta2': 0.999, 'adam_epsilon': 1e-08, 'max_grad_norm': 1.0, 'num_train_epochs': 30, 'max_steps': -1, 'lr_scheduler_type': 'linear', 'lr_scheduler_kwargs': {}, 'warmup_ratio': 0.0, 'warmup_steps': 1000, 'log_level': 'passive', 'log_level_replica': 'warning', 'log_on_each_node': True, 'logging_dir': '/kaggle/working/runs/May24_20-31-07_890e8b3ca76b', 'logging_strategy': 'steps', 'logging_first_step': False, 'logging_steps': 500, 'logging_nan_inf_filter': True, 'save_strategy': 'steps', 'save_steps': 500, 'save_total_limit': 2, 'save_safetensors': True, 'save_on_each_node': False, 'save_only_model': False, 'no_cuda': False, 'use_cpu': False, 'use_mps_device': False, 'seed': 42, 'data_seed': None, 'jit_mode_eval': False, 'use_ipex': False, 'bf16': False, 'fp16': True, 'fp16_opt_level': 'O1', 'half_precision_backend': 'auto', 'bf16_full_eval': False, 'fp16_full_eval': False, 'tf32': None, 'local_rank': 0, 'ddp_backend': None, 'tpu_num_cores': None, 'tpu_metrics_debug': False, 'debug': [], 'dataloader_drop_last': False, 'eval_steps': 500, 'dataloader_num_workers': 0, 'dataloader_prefetch_factor': None, 'past_index': -1, 'run_name': '/kaggle/working/', 'disable_tqdm': False, 'remove_unused_columns': True, 'label_names': None, 'load_best_model_at_end': False, 'metric_for_best_model': None, 'greater_is_better': None, 'ignore_data_skip': False, 'fsdp': [], 'fsdp_min_num_params': 0, 'fsdp_config': {'min_num_params': 0, 'xla': False, 'xla_fsdp_v2': False, 'xla_fsdp_grad_ckpt': False}, 'fsdp_transformer_layer_cls_to_wrap': None, 'accelerator_config': {'split_batches': False, 'dispatch_batches': None, 'even_batches': True, 'use_seedable_sampler': True}, 'deepspeed': None, 'label_smoothing_factor': 0.0, 'optim': 'adamw_torch', 'optim_args': None, 'adafactor': False, 'group_by_length': True, 'length_column_name': 'length', 'report_to': ['tensorboard', 'wandb'], 'ddp_find_unused_parameters': None, 'ddp_bucket_cap_mb': None, 'ddp_broadcast_buffers': None, 'dataloader_pin_memory': True, 'dataloader_persistent_workers': False, 'skip_memory_metrics': True, 'use_legacy_prediction_loop': False, 'push_to_hub': True, 'resume_from_checkpoint': None, 'hub_model_id': None, 'hub_strategy': 'every_save', 'hub_token': '<HUB_TOKEN>', 'hub_private_repo': False, 'hub_always_push': False, 'gradient_checkpointing': True, 'gradient_checkpointing_kwargs': None, 'include_inputs_for_metrics': False, 'fp16_backend': 'auto', 'push_to_hub_model_id': None, 'push_to_hub_organization': None, 'push_to_hub_token': '<PUSH_TO_HUB_TOKEN>', 'mp_parameters': '', 'auto_find_batch_size': False, 'full_determinism': False, 'torchdynamo': None, 'ray_scope': 'last', 'ddp_timeout': 1800, 'torch_compile': False, 'torch_compile_backend': None, 'torch_compile_mode': None, 'dispatch_batches': None, 'split_batches': None, 'include_tokens_per_second': False, 'include_num_input_tokens_seen': False, 'neftune_noise_alpha': None, 'optim_target_modules': None}
67
+ 2024-05-24 20:31:58,626 INFO MainThread:34 [jupyter.py:save_ipynb():373] not saving jupyter notebook
68
+ 2024-05-24 20:31:58,626 INFO MainThread:34 [wandb_init.py:_pause_backend():432] pausing backend
69
+ 2024-05-24 20:34:06,571 INFO MainThread:34 [wandb_init.py:_resume_backend():437] resuming backend
70
+ 2024-05-24 20:34:06,717 INFO MainThread:34 [jupyter.py:save_ipynb():373] not saving jupyter notebook
71
+ 2024-05-24 20:34:06,717 INFO MainThread:34 [wandb_init.py:_pause_backend():432] pausing backend
72
+ 2024-05-24 20:34:10,001 INFO MainThread:34 [wandb_init.py:_resume_backend():437] resuming backend
73
+ 2024-05-24 20:34:46,464 INFO MainThread:34 [wandb_run.py:_config_callback():1347] config_cb None None {'return_dict': True, 'output_hidden_states': False, 'output_attentions': False, 'torchscript': False, 'torch_dtype': None, 'use_bfloat16': False, 'tf_legacy_loss': False, 'pruned_heads': {}, 'tie_word_embeddings': True, 'chunk_size_feed_forward': 0, 'is_encoder_decoder': False, 'is_decoder': False, 'cross_attention_hidden_size': None, 'add_cross_attention': False, 'tie_encoder_decoder': False, 'max_length': 20, 'min_length': 0, 'do_sample': False, 'early_stopping': False, 'num_beams': 1, 'num_beam_groups': 1, 'diversity_penalty': 0.0, 'temperature': 1.0, 'top_k': 50, 'top_p': 1.0, 'typical_p': 1.0, 'repetition_penalty': 1.0, 'length_penalty': 1.0, 'no_repeat_ngram_size': 0, 'encoder_no_repeat_ngram_size': 0, 'bad_words_ids': None, 'num_return_sequences': 1, 'output_scores': False, 'return_dict_in_generate': False, 'forced_bos_token_id': None, 'forced_eos_token_id': None, 'remove_invalid_values': False, 'exponential_decay_length_penalty': None, 'suppress_tokens': None, 'begin_suppress_tokens': None, 'architectures': ['Wav2Vec2ForPreTraining'], 'finetuning_task': None, 'id2label': {0: 'LABEL_0', 1: 'LABEL_1'}, 'label2id': {'LABEL_0': 0, 'LABEL_1': 1}, 'tokenizer_class': None, 'prefix': None, 'bos_token_id': 1, 'pad_token_id': 0, 'eos_token_id': 2, 'sep_token_id': None, 'decoder_start_token_id': None, 'task_specific_params': None, 'problem_type': None, '_name_or_path': 'facebook/wav2vec2-base', 'transformers_version': '4.39.3', 'freeze_feat_extract_train': True, 'mask_channel_length': 10, 'mask_channel_min_space': 1, 'mask_channel_other': 0.0, 'mask_channel_prob': 0.0, 'mask_channel_selection': 'static', 'mask_time_min_space': 1, 'mask_time_other': 0.0, 'mask_time_selection': 'static', 'model_type': 'wav2vec2', 'no_mask_channel_overlap': False, 'no_mask_time_overlap': False, 'num_feat_extract_layers': 7, 'hidden_size': 768, 'feat_extract_norm': 'group', 'feat_extract_activation': 'gelu', 'conv_dim': [512, 512, 512, 512, 512, 512, 512], 'conv_stride': [5, 2, 2, 2, 2, 2, 2], 'conv_kernel': [10, 3, 3, 3, 3, 2, 2], 'conv_bias': False, 'num_conv_pos_embeddings': 128, 'num_conv_pos_embedding_groups': 16, 'num_hidden_layers': 12, 'intermediate_size': 3072, 'hidden_act': 'gelu', 'num_attention_heads': 12, 'hidden_dropout': 0.1, 'attention_dropout': 0.1, 'activation_dropout': 0.0, 'feat_proj_dropout': 0.1, 'final_dropout': 0.0, 'layerdrop': 0.0, 'layer_norm_eps': 1e-05, 'initializer_range': 0.02, 'vocab_size': 32, 'do_stable_layer_norm': False, 'use_weighted_layer_sum': False, 'apply_spec_augment': True, 'mask_time_prob': 0.05, 'mask_time_length': 10, 'mask_time_min_masks': 2, 'mask_feature_prob': 0.0, 'mask_feature_length': 10, 'mask_feature_min_masks': 0, 'num_codevectors_per_group': 320, 'num_codevector_groups': 2, 'contrastive_logits_temperature': 0.1, 'feat_quantizer_dropout': 0.0, 'num_negatives': 100, 'codevector_dim': 256, 'proj_codevector_dim': 256, 'diversity_loss_weight': 0.1, 'ctc_loss_reduction': 'sum', 'ctc_zero_infinity': False, 'add_adapter': False, 'adapter_kernel_size': 3, 'adapter_stride': 2, 'num_adapter_layers': 3, 'output_hidden_size': 768, 'adapter_attn_dim': None, 'classifier_proj_size': 256, 'tdnn_dim': [512, 512, 512, 512, 1500], 'tdnn_kernel': [5, 3, 3, 1, 1], 'tdnn_dilation': [1, 2, 3, 1, 1], 'xvector_output_dim': 512, 'output_dir': '/kaggle/working/', 'overwrite_output_dir': False, 'do_train': False, 'do_eval': True, 'do_predict': False, 'evaluation_strategy': 'steps', 'prediction_loss_only': False, 'per_device_train_batch_size': 64, 'per_device_eval_batch_size': 8, 'per_gpu_train_batch_size': None, 'per_gpu_eval_batch_size': None, 'gradient_accumulation_steps': 1, 'eval_accumulation_steps': None, 'eval_delay': 0, 'learning_rate': 0.0001, 'weight_decay': 0.005, 'adam_beta1': 0.9, 'adam_beta2': 0.999, 'adam_epsilon': 1e-08, 'max_grad_norm': 1.0, 'num_train_epochs': 30, 'max_steps': -1, 'lr_scheduler_type': 'linear', 'lr_scheduler_kwargs': {}, 'warmup_ratio': 0.0, 'warmup_steps': 1000, 'log_level': 'passive', 'log_level_replica': 'warning', 'log_on_each_node': True, 'logging_dir': '/kaggle/working/runs/May24_20-34-06_890e8b3ca76b', 'logging_strategy': 'steps', 'logging_first_step': False, 'logging_steps': 500, 'logging_nan_inf_filter': True, 'save_strategy': 'steps', 'save_steps': 500, 'save_total_limit': 2, 'save_safetensors': True, 'save_on_each_node': False, 'save_only_model': False, 'no_cuda': False, 'use_cpu': False, 'use_mps_device': False, 'seed': 42, 'data_seed': None, 'jit_mode_eval': False, 'use_ipex': False, 'bf16': False, 'fp16': True, 'fp16_opt_level': 'O1', 'half_precision_backend': 'auto', 'bf16_full_eval': False, 'fp16_full_eval': False, 'tf32': None, 'local_rank': 0, 'ddp_backend': None, 'tpu_num_cores': None, 'tpu_metrics_debug': False, 'debug': [], 'dataloader_drop_last': False, 'eval_steps': 500, 'dataloader_num_workers': 0, 'dataloader_prefetch_factor': None, 'past_index': -1, 'run_name': '/kaggle/working/', 'disable_tqdm': False, 'remove_unused_columns': True, 'label_names': None, 'load_best_model_at_end': False, 'metric_for_best_model': None, 'greater_is_better': None, 'ignore_data_skip': False, 'fsdp': [], 'fsdp_min_num_params': 0, 'fsdp_config': {'min_num_params': 0, 'xla': False, 'xla_fsdp_v2': False, 'xla_fsdp_grad_ckpt': False}, 'fsdp_transformer_layer_cls_to_wrap': None, 'accelerator_config': {'split_batches': False, 'dispatch_batches': None, 'even_batches': True, 'use_seedable_sampler': True}, 'deepspeed': None, 'label_smoothing_factor': 0.0, 'optim': 'adamw_torch', 'optim_args': None, 'adafactor': False, 'group_by_length': True, 'length_column_name': 'length', 'report_to': ['tensorboard', 'wandb'], 'ddp_find_unused_parameters': None, 'ddp_bucket_cap_mb': None, 'ddp_broadcast_buffers': None, 'dataloader_pin_memory': True, 'dataloader_persistent_workers': False, 'skip_memory_metrics': True, 'use_legacy_prediction_loop': False, 'push_to_hub': True, 'resume_from_checkpoint': None, 'hub_model_id': None, 'hub_strategy': 'every_save', 'hub_token': '<HUB_TOKEN>', 'hub_private_repo': False, 'hub_always_push': False, 'gradient_checkpointing': True, 'gradient_checkpointing_kwargs': None, 'include_inputs_for_metrics': False, 'fp16_backend': 'auto', 'push_to_hub_model_id': None, 'push_to_hub_organization': None, 'push_to_hub_token': '<PUSH_TO_HUB_TOKEN>', 'mp_parameters': '', 'auto_find_batch_size': False, 'full_determinism': False, 'torchdynamo': None, 'ray_scope': 'last', 'ddp_timeout': 1800, 'torch_compile': False, 'torch_compile_backend': None, 'torch_compile_mode': None, 'dispatch_batches': None, 'split_batches': None, 'include_tokens_per_second': False, 'include_num_input_tokens_seen': False, 'neftune_noise_alpha': None, 'optim_target_modules': None}
74
+ 2024-05-24 20:34:55,928 INFO MainThread:34 [jupyter.py:save_ipynb():373] not saving jupyter notebook
75
+ 2024-05-24 20:34:55,929 INFO MainThread:34 [wandb_init.py:_pause_backend():432] pausing backend
76
+ 2024-05-24 20:35:26,131 INFO MainThread:34 [wandb_init.py:_resume_backend():437] resuming backend
77
+ 2024-05-24 20:35:26,139 INFO MainThread:34 [jupyter.py:save_ipynb():373] not saving jupyter notebook
78
+ 2024-05-24 20:35:26,139 INFO MainThread:34 [wandb_init.py:_pause_backend():432] pausing backend
79
+ 2024-05-24 20:35:32,729 INFO MainThread:34 [wandb_init.py:_resume_backend():437] resuming backend
80
+ 2024-05-24 20:35:32,732 INFO MainThread:34 [jupyter.py:save_ipynb():373] not saving jupyter notebook
81
+ 2024-05-24 20:35:32,732 INFO MainThread:34 [wandb_init.py:_pause_backend():432] pausing backend
82
+ 2024-05-24 20:35:34,729 INFO MainThread:34 [wandb_init.py:_resume_backend():437] resuming backend
83
+ 2024-05-24 20:35:34,732 INFO MainThread:34 [jupyter.py:save_ipynb():373] not saving jupyter notebook
84
+ 2024-05-24 20:35:34,733 INFO MainThread:34 [wandb_init.py:_pause_backend():432] pausing backend
85
+ 2024-05-24 20:35:47,483 INFO MainThread:34 [wandb_init.py:_resume_backend():437] resuming backend
86
+ 2024-05-24 20:35:47,488 INFO MainThread:34 [jupyter.py:save_ipynb():373] not saving jupyter notebook
87
+ 2024-05-24 20:35:47,488 INFO MainThread:34 [wandb_init.py:_pause_backend():432] pausing backend
88
+ 2024-05-24 20:35:56,249 INFO MainThread:34 [wandb_init.py:_resume_backend():437] resuming backend
89
+ 2024-05-24 20:35:56,253 INFO MainThread:34 [jupyter.py:save_ipynb():373] not saving jupyter notebook
90
+ 2024-05-24 20:35:56,253 INFO MainThread:34 [wandb_init.py:_pause_backend():432] pausing backend
91
+ 2024-05-24 20:37:20,366 INFO MainThread:34 [wandb_init.py:_resume_backend():437] resuming backend
92
+ 2024-05-24 20:37:20,370 INFO MainThread:34 [jupyter.py:save_ipynb():373] not saving jupyter notebook
93
+ 2024-05-24 20:37:20,370 INFO MainThread:34 [wandb_init.py:_pause_backend():432] pausing backend
94
+ 2024-05-24 20:37:28,481 INFO MainThread:34 [wandb_init.py:_resume_backend():437] resuming backend
95
+ 2024-05-24 20:37:28,510 INFO MainThread:34 [jupyter.py:save_ipynb():373] not saving jupyter notebook
96
+ 2024-05-24 20:37:28,511 INFO MainThread:34 [wandb_init.py:_pause_backend():432] pausing backend
97
+ 2024-05-24 20:38:00,946 INFO MainThread:34 [wandb_init.py:_resume_backend():437] resuming backend
98
+ 2024-05-24 20:38:00,950 INFO MainThread:34 [jupyter.py:save_ipynb():373] not saving jupyter notebook
99
+ 2024-05-24 20:38:00,950 INFO MainThread:34 [wandb_init.py:_pause_backend():432] pausing backend
100
+ 2024-05-24 20:38:04,524 INFO MainThread:34 [wandb_init.py:_resume_backend():437] resuming backend
101
+ 2024-05-24 20:38:04,550 INFO MainThread:34 [jupyter.py:save_ipynb():373] not saving jupyter notebook
102
+ 2024-05-24 20:38:04,550 INFO MainThread:34 [wandb_init.py:_pause_backend():432] pausing backend
103
+ 2024-05-24 20:38:12,369 INFO MainThread:34 [wandb_init.py:_resume_backend():437] resuming backend
104
+ 2024-05-24 20:38:12,432 INFO MainThread:34 [jupyter.py:save_ipynb():373] not saving jupyter notebook
105
+ 2024-05-24 20:38:12,432 INFO MainThread:34 [wandb_init.py:_pause_backend():432] pausing backend
106
+ 2024-05-24 20:38:16,938 INFO MainThread:34 [wandb_init.py:_resume_backend():437] resuming backend
107
+ 2024-05-24 20:38:16,998 INFO MainThread:34 [jupyter.py:save_ipynb():373] not saving jupyter notebook
108
+ 2024-05-24 20:38:16,998 INFO MainThread:34 [wandb_init.py:_pause_backend():432] pausing backend
109
+ 2024-05-24 20:38:21,248 INFO MainThread:34 [wandb_init.py:_resume_backend():437] resuming backend
110
+ 2024-05-24 20:38:21,260 INFO MainThread:34 [jupyter.py:save_ipynb():373] not saving jupyter notebook
111
+ 2024-05-24 20:38:21,260 INFO MainThread:34 [wandb_init.py:_pause_backend():432] pausing backend
112
+ 2024-05-24 20:38:22,930 INFO MainThread:34 [wandb_init.py:_resume_backend():437] resuming backend
113
+ 2024-05-24 20:38:22,940 INFO MainThread:34 [jupyter.py:save_ipynb():373] not saving jupyter notebook
114
+ 2024-05-24 20:38:22,940 INFO MainThread:34 [wandb_init.py:_pause_backend():432] pausing backend
115
+ 2024-05-24 20:38:37,851 INFO MainThread:34 [wandb_init.py:_resume_backend():437] resuming backend
116
+ 2024-05-24 20:38:55,254 INFO MainThread:34 [jupyter.py:save_ipynb():373] not saving jupyter notebook
117
+ 2024-05-24 20:38:55,254 INFO MainThread:34 [wandb_init.py:_pause_backend():432] pausing backend
118
+ 2024-05-24 20:38:58,788 INFO MainThread:34 [wandb_init.py:_resume_backend():437] resuming backend
119
+ 2024-05-24 20:38:58,827 INFO MainThread:34 [jupyter.py:save_ipynb():373] not saving jupyter notebook
120
+ 2024-05-24 20:38:58,827 INFO MainThread:34 [wandb_init.py:_pause_backend():432] pausing backend
121
+ 2024-05-24 20:39:00,762 INFO MainThread:34 [wandb_init.py:_resume_backend():437] resuming backend
122
+ 2024-05-24 20:40:16,333 INFO MainThread:34 [jupyter.py:save_ipynb():373] not saving jupyter notebook
123
+ 2024-05-24 20:40:16,334 INFO MainThread:34 [wandb_init.py:_pause_backend():432] pausing backend
124
+ 2024-05-24 20:40:21,472 INFO MainThread:34 [wandb_init.py:_resume_backend():437] resuming backend
125
+ 2024-05-24 20:40:22,682 INFO MainThread:34 [jupyter.py:save_ipynb():373] not saving jupyter notebook
126
+ 2024-05-24 20:40:22,682 INFO MainThread:34 [wandb_init.py:_pause_backend():432] pausing backend
127
+ 2024-05-24 20:40:28,979 INFO MainThread:34 [wandb_init.py:_resume_backend():437] resuming backend
128
+ 2024-05-24 20:40:33,961 INFO MainThread:34 [jupyter.py:save_ipynb():373] not saving jupyter notebook
129
+ 2024-05-24 20:40:33,961 INFO MainThread:34 [wandb_init.py:_pause_backend():432] pausing backend
130
+ 2024-05-24 20:40:36,994 INFO MainThread:34 [wandb_init.py:_resume_backend():437] resuming backend
131
+ 2024-05-24 20:40:36,996 INFO MainThread:34 [jupyter.py:save_ipynb():373] not saving jupyter notebook
132
+ 2024-05-24 20:40:36,997 INFO MainThread:34 [wandb_init.py:_pause_backend():432] pausing backend
133
+ 2024-05-24 20:40:42,203 INFO MainThread:34 [wandb_init.py:_resume_backend():437] resuming backend
134
+ 2024-05-24 20:40:42,621 INFO MainThread:34 [jupyter.py:save_ipynb():373] not saving jupyter notebook
135
+ 2024-05-24 20:40:42,621 INFO MainThread:34 [wandb_init.py:_pause_backend():432] pausing backend
136
+ 2024-05-24 20:40:44,473 INFO MainThread:34 [wandb_init.py:_resume_backend():437] resuming backend
137
+ 2024-05-24 20:40:44,475 INFO MainThread:34 [jupyter.py:save_ipynb():373] not saving jupyter notebook
138
+ 2024-05-24 20:40:44,475 INFO MainThread:34 [wandb_init.py:_pause_backend():432] pausing backend
139
+ 2024-05-24 20:40:48,431 INFO MainThread:34 [wandb_init.py:_resume_backend():437] resuming backend
140
+ 2024-05-24 20:40:48,669 INFO MainThread:34 [jupyter.py:save_ipynb():373] not saving jupyter notebook
141
+ 2024-05-24 20:40:48,669 INFO MainThread:34 [wandb_init.py:_pause_backend():432] pausing backend
142
+ 2024-05-24 20:40:50,725 INFO MainThread:34 [wandb_init.py:_resume_backend():437] resuming backend
143
+ 2024-05-24 20:40:50,730 INFO MainThread:34 [jupyter.py:save_ipynb():373] not saving jupyter notebook
144
+ 2024-05-24 20:40:50,730 INFO MainThread:34 [wandb_init.py:_pause_backend():432] pausing backend
145
+ 2024-05-24 20:40:52,537 INFO MainThread:34 [wandb_init.py:_resume_backend():437] resuming backend
146
+ 2024-05-24 20:41:30,450 INFO MainThread:34 [wandb_run.py:_config_callback():1347] config_cb None None {'return_dict': True, 'output_hidden_states': False, 'output_attentions': False, 'torchscript': False, 'torch_dtype': None, 'use_bfloat16': False, 'tf_legacy_loss': False, 'pruned_heads': {}, 'tie_word_embeddings': True, 'chunk_size_feed_forward': 0, 'is_encoder_decoder': False, 'is_decoder': False, 'cross_attention_hidden_size': None, 'add_cross_attention': False, 'tie_encoder_decoder': False, 'max_length': 20, 'min_length': 0, 'do_sample': False, 'early_stopping': False, 'num_beams': 1, 'num_beam_groups': 1, 'diversity_penalty': 0.0, 'temperature': 1.0, 'top_k': 50, 'top_p': 1.0, 'typical_p': 1.0, 'repetition_penalty': 1.0, 'length_penalty': 1.0, 'no_repeat_ngram_size': 0, 'encoder_no_repeat_ngram_size': 0, 'bad_words_ids': None, 'num_return_sequences': 1, 'output_scores': False, 'return_dict_in_generate': False, 'forced_bos_token_id': None, 'forced_eos_token_id': None, 'remove_invalid_values': False, 'exponential_decay_length_penalty': None, 'suppress_tokens': None, 'begin_suppress_tokens': None, 'architectures': ['Wav2Vec2ForPreTraining'], 'finetuning_task': None, 'id2label': {0: 'LABEL_0', 1: 'LABEL_1'}, 'label2id': {'LABEL_0': 0, 'LABEL_1': 1}, 'tokenizer_class': None, 'prefix': None, 'bos_token_id': 1, 'pad_token_id': 0, 'eos_token_id': 2, 'sep_token_id': None, 'decoder_start_token_id': None, 'task_specific_params': None, 'problem_type': None, '_name_or_path': 'facebook/wav2vec2-base', 'transformers_version': '4.39.3', 'freeze_feat_extract_train': True, 'mask_channel_length': 10, 'mask_channel_min_space': 1, 'mask_channel_other': 0.0, 'mask_channel_prob': 0.0, 'mask_channel_selection': 'static', 'mask_time_min_space': 1, 'mask_time_other': 0.0, 'mask_time_selection': 'static', 'model_type': 'wav2vec2', 'no_mask_channel_overlap': False, 'no_mask_time_overlap': False, 'num_feat_extract_layers': 7, 'hidden_size': 768, 'feat_extract_norm': 'group', 'feat_extract_activation': 'gelu', 'conv_dim': [512, 512, 512, 512, 512, 512, 512], 'conv_stride': [5, 2, 2, 2, 2, 2, 2], 'conv_kernel': [10, 3, 3, 3, 3, 2, 2], 'conv_bias': False, 'num_conv_pos_embeddings': 128, 'num_conv_pos_embedding_groups': 16, 'num_hidden_layers': 12, 'intermediate_size': 3072, 'hidden_act': 'gelu', 'num_attention_heads': 12, 'hidden_dropout': 0.1, 'attention_dropout': 0.1, 'activation_dropout': 0.0, 'feat_proj_dropout': 0.1, 'final_dropout': 0.0, 'layerdrop': 0.0, 'layer_norm_eps': 1e-05, 'initializer_range': 0.02, 'vocab_size': 32, 'do_stable_layer_norm': False, 'use_weighted_layer_sum': False, 'apply_spec_augment': True, 'mask_time_prob': 0.05, 'mask_time_length': 10, 'mask_time_min_masks': 2, 'mask_feature_prob': 0.0, 'mask_feature_length': 10, 'mask_feature_min_masks': 0, 'num_codevectors_per_group': 320, 'num_codevector_groups': 2, 'contrastive_logits_temperature': 0.1, 'feat_quantizer_dropout': 0.0, 'num_negatives': 100, 'codevector_dim': 256, 'proj_codevector_dim': 256, 'diversity_loss_weight': 0.1, 'ctc_loss_reduction': 'sum', 'ctc_zero_infinity': False, 'add_adapter': False, 'adapter_kernel_size': 3, 'adapter_stride': 2, 'num_adapter_layers': 3, 'output_hidden_size': 768, 'adapter_attn_dim': None, 'classifier_proj_size': 256, 'tdnn_dim': [512, 512, 512, 512, 1500], 'tdnn_kernel': [5, 3, 3, 1, 1], 'tdnn_dilation': [1, 2, 3, 1, 1], 'xvector_output_dim': 512, 'output_dir': '/kaggle/working/', 'overwrite_output_dir': False, 'do_train': False, 'do_eval': True, 'do_predict': False, 'evaluation_strategy': 'steps', 'prediction_loss_only': False, 'per_device_train_batch_size': 64, 'per_device_eval_batch_size': 8, 'per_gpu_train_batch_size': None, 'per_gpu_eval_batch_size': None, 'gradient_accumulation_steps': 1, 'eval_accumulation_steps': None, 'eval_delay': 0, 'learning_rate': 0.0001, 'weight_decay': 0.005, 'adam_beta1': 0.9, 'adam_beta2': 0.999, 'adam_epsilon': 1e-08, 'max_grad_norm': 1.0, 'num_train_epochs': 30, 'max_steps': -1, 'lr_scheduler_type': 'linear', 'lr_scheduler_kwargs': {}, 'warmup_ratio': 0.0, 'warmup_steps': 1000, 'log_level': 'passive', 'log_level_replica': 'warning', 'log_on_each_node': True, 'logging_dir': '/kaggle/working/runs/May24_20-40-48_890e8b3ca76b', 'logging_strategy': 'steps', 'logging_first_step': False, 'logging_steps': 500, 'logging_nan_inf_filter': True, 'save_strategy': 'steps', 'save_steps': 500, 'save_total_limit': 2, 'save_safetensors': True, 'save_on_each_node': False, 'save_only_model': False, 'no_cuda': False, 'use_cpu': False, 'use_mps_device': False, 'seed': 42, 'data_seed': None, 'jit_mode_eval': False, 'use_ipex': False, 'bf16': False, 'fp16': True, 'fp16_opt_level': 'O1', 'half_precision_backend': 'auto', 'bf16_full_eval': False, 'fp16_full_eval': False, 'tf32': None, 'local_rank': 0, 'ddp_backend': None, 'tpu_num_cores': None, 'tpu_metrics_debug': False, 'debug': [], 'dataloader_drop_last': False, 'eval_steps': 500, 'dataloader_num_workers': 0, 'dataloader_prefetch_factor': None, 'past_index': -1, 'run_name': '/kaggle/working/', 'disable_tqdm': False, 'remove_unused_columns': True, 'label_names': None, 'load_best_model_at_end': False, 'metric_for_best_model': None, 'greater_is_better': None, 'ignore_data_skip': False, 'fsdp': [], 'fsdp_min_num_params': 0, 'fsdp_config': {'min_num_params': 0, 'xla': False, 'xla_fsdp_v2': False, 'xla_fsdp_grad_ckpt': False}, 'fsdp_transformer_layer_cls_to_wrap': None, 'accelerator_config': {'split_batches': False, 'dispatch_batches': None, 'even_batches': True, 'use_seedable_sampler': True}, 'deepspeed': None, 'label_smoothing_factor': 0.0, 'optim': 'adamw_torch', 'optim_args': None, 'adafactor': False, 'group_by_length': True, 'length_column_name': 'length', 'report_to': ['tensorboard', 'wandb'], 'ddp_find_unused_parameters': None, 'ddp_bucket_cap_mb': None, 'ddp_broadcast_buffers': None, 'dataloader_pin_memory': True, 'dataloader_persistent_workers': False, 'skip_memory_metrics': True, 'use_legacy_prediction_loop': False, 'push_to_hub': True, 'resume_from_checkpoint': None, 'hub_model_id': None, 'hub_strategy': 'every_save', 'hub_token': '<HUB_TOKEN>', 'hub_private_repo': False, 'hub_always_push': False, 'gradient_checkpointing': True, 'gradient_checkpointing_kwargs': None, 'include_inputs_for_metrics': False, 'fp16_backend': 'auto', 'push_to_hub_model_id': None, 'push_to_hub_organization': None, 'push_to_hub_token': '<PUSH_TO_HUB_TOKEN>', 'mp_parameters': '', 'auto_find_batch_size': False, 'full_determinism': False, 'torchdynamo': None, 'ray_scope': 'last', 'ddp_timeout': 1800, 'torch_compile': False, 'torch_compile_backend': None, 'torch_compile_mode': None, 'dispatch_batches': None, 'split_batches': None, 'include_tokens_per_second': False, 'include_num_input_tokens_seen': False, 'neftune_noise_alpha': None, 'optim_target_modules': None}
147
+ 2024-05-24 20:41:39,043 INFO MainThread:34 [jupyter.py:save_ipynb():373] not saving jupyter notebook
148
+ 2024-05-24 20:41:39,044 INFO MainThread:34 [wandb_init.py:_pause_backend():432] pausing backend
149
+ 2024-05-24 20:42:26,950 INFO MainThread:34 [wandb_init.py:_resume_backend():437] resuming backend
150
+ 2024-05-24 20:42:27,128 INFO MainThread:34 [jupyter.py:save_ipynb():373] not saving jupyter notebook
151
+ 2024-05-24 20:42:27,128 INFO MainThread:34 [wandb_init.py:_pause_backend():432] pausing backend
152
+ 2024-05-24 20:42:29,871 INFO MainThread:34 [wandb_init.py:_resume_backend():437] resuming backend
153
+ 2024-05-24 20:43:07,276 INFO MainThread:34 [wandb_run.py:_config_callback():1347] config_cb None None {'return_dict': True, 'output_hidden_states': False, 'output_attentions': False, 'torchscript': False, 'torch_dtype': None, 'use_bfloat16': False, 'tf_legacy_loss': False, 'pruned_heads': {}, 'tie_word_embeddings': True, 'chunk_size_feed_forward': 0, 'is_encoder_decoder': False, 'is_decoder': False, 'cross_attention_hidden_size': None, 'add_cross_attention': False, 'tie_encoder_decoder': False, 'max_length': 20, 'min_length': 0, 'do_sample': False, 'early_stopping': False, 'num_beams': 1, 'num_beam_groups': 1, 'diversity_penalty': 0.0, 'temperature': 1.0, 'top_k': 50, 'top_p': 1.0, 'typical_p': 1.0, 'repetition_penalty': 1.0, 'length_penalty': 1.0, 'no_repeat_ngram_size': 0, 'encoder_no_repeat_ngram_size': 0, 'bad_words_ids': None, 'num_return_sequences': 1, 'output_scores': False, 'return_dict_in_generate': False, 'forced_bos_token_id': None, 'forced_eos_token_id': None, 'remove_invalid_values': False, 'exponential_decay_length_penalty': None, 'suppress_tokens': None, 'begin_suppress_tokens': None, 'architectures': ['Wav2Vec2ForPreTraining'], 'finetuning_task': None, 'id2label': {0: 'LABEL_0', 1: 'LABEL_1'}, 'label2id': {'LABEL_0': 0, 'LABEL_1': 1}, 'tokenizer_class': None, 'prefix': None, 'bos_token_id': 1, 'pad_token_id': 0, 'eos_token_id': 2, 'sep_token_id': None, 'decoder_start_token_id': None, 'task_specific_params': None, 'problem_type': None, '_name_or_path': 'facebook/wav2vec2-base', 'transformers_version': '4.39.3', 'freeze_feat_extract_train': True, 'mask_channel_length': 10, 'mask_channel_min_space': 1, 'mask_channel_other': 0.0, 'mask_channel_prob': 0.0, 'mask_channel_selection': 'static', 'mask_time_min_space': 1, 'mask_time_other': 0.0, 'mask_time_selection': 'static', 'model_type': 'wav2vec2', 'no_mask_channel_overlap': False, 'no_mask_time_overlap': False, 'num_feat_extract_layers': 7, 'hidden_size': 768, 'feat_extract_norm': 'group', 'feat_extract_activation': 'gelu', 'conv_dim': [512, 512, 512, 512, 512, 512, 512], 'conv_stride': [5, 2, 2, 2, 2, 2, 2], 'conv_kernel': [10, 3, 3, 3, 3, 2, 2], 'conv_bias': False, 'num_conv_pos_embeddings': 128, 'num_conv_pos_embedding_groups': 16, 'num_hidden_layers': 12, 'intermediate_size': 3072, 'hidden_act': 'gelu', 'num_attention_heads': 12, 'hidden_dropout': 0.1, 'attention_dropout': 0.1, 'activation_dropout': 0.0, 'feat_proj_dropout': 0.1, 'final_dropout': 0.0, 'layerdrop': 0.0, 'layer_norm_eps': 1e-05, 'initializer_range': 0.02, 'vocab_size': 32, 'do_stable_layer_norm': False, 'use_weighted_layer_sum': False, 'apply_spec_augment': True, 'mask_time_prob': 0.05, 'mask_time_length': 10, 'mask_time_min_masks': 2, 'mask_feature_prob': 0.0, 'mask_feature_length': 10, 'mask_feature_min_masks': 0, 'num_codevectors_per_group': 320, 'num_codevector_groups': 2, 'contrastive_logits_temperature': 0.1, 'feat_quantizer_dropout': 0.0, 'num_negatives': 100, 'codevector_dim': 256, 'proj_codevector_dim': 256, 'diversity_loss_weight': 0.1, 'ctc_loss_reduction': 'sum', 'ctc_zero_infinity': False, 'add_adapter': False, 'adapter_kernel_size': 3, 'adapter_stride': 2, 'num_adapter_layers': 3, 'output_hidden_size': 768, 'adapter_attn_dim': None, 'classifier_proj_size': 256, 'tdnn_dim': [512, 512, 512, 512, 1500], 'tdnn_kernel': [5, 3, 3, 1, 1], 'tdnn_dilation': [1, 2, 3, 1, 1], 'xvector_output_dim': 512, 'output_dir': '/kaggle/working/', 'overwrite_output_dir': False, 'do_train': False, 'do_eval': True, 'do_predict': False, 'evaluation_strategy': 'steps', 'prediction_loss_only': False, 'per_device_train_batch_size': 16, 'per_device_eval_batch_size': 8, 'per_gpu_train_batch_size': None, 'per_gpu_eval_batch_size': None, 'gradient_accumulation_steps': 1, 'eval_accumulation_steps': None, 'eval_delay': 0, 'learning_rate': 0.0001, 'weight_decay': 0.005, 'adam_beta1': 0.9, 'adam_beta2': 0.999, 'adam_epsilon': 1e-08, 'max_grad_norm': 1.0, 'num_train_epochs': 30, 'max_steps': -1, 'lr_scheduler_type': 'linear', 'lr_scheduler_kwargs': {}, 'warmup_ratio': 0.0, 'warmup_steps': 1000, 'log_level': 'passive', 'log_level_replica': 'warning', 'log_on_each_node': True, 'logging_dir': '/kaggle/working/runs/May24_20-42-26_890e8b3ca76b', 'logging_strategy': 'steps', 'logging_first_step': False, 'logging_steps': 500, 'logging_nan_inf_filter': True, 'save_strategy': 'steps', 'save_steps': 500, 'save_total_limit': 2, 'save_safetensors': True, 'save_on_each_node': False, 'save_only_model': False, 'no_cuda': False, 'use_cpu': False, 'use_mps_device': False, 'seed': 42, 'data_seed': None, 'jit_mode_eval': False, 'use_ipex': False, 'bf16': False, 'fp16': True, 'fp16_opt_level': 'O1', 'half_precision_backend': 'auto', 'bf16_full_eval': False, 'fp16_full_eval': False, 'tf32': None, 'local_rank': 0, 'ddp_backend': None, 'tpu_num_cores': None, 'tpu_metrics_debug': False, 'debug': [], 'dataloader_drop_last': False, 'eval_steps': 500, 'dataloader_num_workers': 0, 'dataloader_prefetch_factor': None, 'past_index': -1, 'run_name': '/kaggle/working/', 'disable_tqdm': False, 'remove_unused_columns': True, 'label_names': None, 'load_best_model_at_end': False, 'metric_for_best_model': None, 'greater_is_better': None, 'ignore_data_skip': False, 'fsdp': [], 'fsdp_min_num_params': 0, 'fsdp_config': {'min_num_params': 0, 'xla': False, 'xla_fsdp_v2': False, 'xla_fsdp_grad_ckpt': False}, 'fsdp_transformer_layer_cls_to_wrap': None, 'accelerator_config': {'split_batches': False, 'dispatch_batches': None, 'even_batches': True, 'use_seedable_sampler': True}, 'deepspeed': None, 'label_smoothing_factor': 0.0, 'optim': 'adamw_torch', 'optim_args': None, 'adafactor': False, 'group_by_length': True, 'length_column_name': 'length', 'report_to': ['tensorboard', 'wandb'], 'ddp_find_unused_parameters': None, 'ddp_bucket_cap_mb': None, 'ddp_broadcast_buffers': None, 'dataloader_pin_memory': True, 'dataloader_persistent_workers': False, 'skip_memory_metrics': True, 'use_legacy_prediction_loop': False, 'push_to_hub': True, 'resume_from_checkpoint': None, 'hub_model_id': None, 'hub_strategy': 'every_save', 'hub_token': '<HUB_TOKEN>', 'hub_private_repo': False, 'hub_always_push': False, 'gradient_checkpointing': True, 'gradient_checkpointing_kwargs': None, 'include_inputs_for_metrics': False, 'fp16_backend': 'auto', 'push_to_hub_model_id': None, 'push_to_hub_organization': None, 'push_to_hub_token': '<PUSH_TO_HUB_TOKEN>', 'mp_parameters': '', 'auto_find_batch_size': False, 'full_determinism': False, 'torchdynamo': None, 'ray_scope': 'last', 'ddp_timeout': 1800, 'torch_compile': False, 'torch_compile_backend': None, 'torch_compile_mode': None, 'dispatch_batches': None, 'split_batches': None, 'include_tokens_per_second': False, 'include_num_input_tokens_seen': False, 'neftune_noise_alpha': None, 'optim_target_modules': None}
154
+ 2024-05-24 20:43:12,247 INFO MainThread:34 [jupyter.py:save_ipynb():373] not saving jupyter notebook
155
+ 2024-05-24 20:43:12,247 INFO MainThread:34 [wandb_init.py:_pause_backend():432] pausing backend
156
+ 2024-05-24 20:44:08,643 INFO MainThread:34 [wandb_init.py:_resume_backend():437] resuming backend
157
+ 2024-05-24 20:44:14,294 INFO MainThread:34 [jupyter.py:save_ipynb():373] not saving jupyter notebook
158
+ 2024-05-24 20:44:14,295 INFO MainThread:34 [wandb_init.py:_pause_backend():432] pausing backend
159
+ 2024-05-24 20:44:39,719 INFO MainThread:34 [wandb_init.py:_resume_backend():437] resuming backend
160
+ 2024-05-24 20:44:39,770 INFO MainThread:34 [jupyter.py:save_ipynb():373] not saving jupyter notebook
161
+ 2024-05-24 20:44:39,770 INFO MainThread:34 [wandb_init.py:_pause_backend():432] pausing backend
162
+ 2024-05-24 20:44:43,543 INFO MainThread:34 [wandb_init.py:_resume_backend():437] resuming backend
163
+ 2024-05-24 20:46:31,143 INFO MainThread:34 [jupyter.py:save_ipynb():373] not saving jupyter notebook
164
+ 2024-05-24 20:46:31,143 INFO MainThread:34 [wandb_init.py:_pause_backend():432] pausing backend
165
+ 2024-05-24 20:46:49,361 INFO MainThread:34 [wandb_init.py:_resume_backend():437] resuming backend
166
+ 2024-05-24 20:46:49,391 INFO MainThread:34 [jupyter.py:save_ipynb():373] not saving jupyter notebook
167
+ 2024-05-24 20:46:49,391 INFO MainThread:34 [wandb_init.py:_pause_backend():432] pausing backend
168
+ 2024-05-24 20:46:52,169 INFO MainThread:34 [wandb_init.py:_resume_backend():437] resuming backend
169
+ 2024-05-24 20:46:54,391 INFO MainThread:34 [jupyter.py:save_ipynb():373] not saving jupyter notebook
170
+ 2024-05-24 20:46:54,391 INFO MainThread:34 [wandb_init.py:_pause_backend():432] pausing backend
171
+ 2024-05-24 20:46:58,317 INFO MainThread:34 [wandb_init.py:_resume_backend():437] resuming backend
172
+ 2024-05-24 20:46:58,322 INFO MainThread:34 [jupyter.py:save_ipynb():373] not saving jupyter notebook
173
+ 2024-05-24 20:46:58,322 INFO MainThread:34 [wandb_init.py:_pause_backend():432] pausing backend
174
+ 2024-05-24 20:47:06,482 INFO MainThread:34 [wandb_init.py:_resume_backend():437] resuming backend
175
+ 2024-05-24 20:47:07,830 INFO MainThread:34 [jupyter.py:save_ipynb():373] not saving jupyter notebook
176
+ 2024-05-24 20:47:07,830 INFO MainThread:34 [wandb_init.py:_pause_backend():432] pausing backend
177
+ 2024-05-24 20:47:10,833 INFO MainThread:34 [wandb_init.py:_resume_backend():437] resuming backend
178
+ 2024-05-24 20:47:10,835 INFO MainThread:34 [jupyter.py:save_ipynb():373] not saving jupyter notebook
179
+ 2024-05-24 20:47:10,839 INFO MainThread:34 [wandb_init.py:_pause_backend():432] pausing backend
180
+ 2024-05-24 20:47:14,002 INFO MainThread:34 [wandb_init.py:_resume_backend():437] resuming backend
181
+ 2024-05-24 20:48:02,460 INFO MainThread:34 [jupyter.py:save_ipynb():373] not saving jupyter notebook
182
+ 2024-05-24 20:48:02,460 INFO MainThread:34 [wandb_init.py:_pause_backend():432] pausing backend
183
+ 2024-05-24 20:48:05,731 INFO MainThread:34 [wandb_init.py:_resume_backend():437] resuming backend
184
+ 2024-05-24 20:48:05,733 INFO MainThread:34 [jupyter.py:save_ipynb():373] not saving jupyter notebook
185
+ 2024-05-24 20:48:05,734 INFO MainThread:34 [wandb_init.py:_pause_backend():432] pausing backend
186
+ 2024-05-24 20:48:08,287 INFO MainThread:34 [wandb_init.py:_resume_backend():437] resuming backend
187
+ 2024-05-24 20:48:08,714 INFO MainThread:34 [jupyter.py:save_ipynb():373] not saving jupyter notebook
188
+ 2024-05-24 20:48:08,714 INFO MainThread:34 [wandb_init.py:_pause_backend():432] pausing backend
189
+ 2024-05-24 20:48:10,290 INFO MainThread:34 [wandb_init.py:_resume_backend():437] resuming backend
190
+ 2024-05-24 20:48:10,293 INFO MainThread:34 [jupyter.py:save_ipynb():373] not saving jupyter notebook
191
+ 2024-05-24 20:48:10,294 INFO MainThread:34 [wandb_init.py:_pause_backend():432] pausing backend
192
+ 2024-05-24 20:48:13,841 INFO MainThread:34 [wandb_init.py:_resume_backend():437] resuming backend
193
+ 2024-05-24 20:48:14,091 INFO MainThread:34 [jupyter.py:save_ipynb():373] not saving jupyter notebook
194
+ 2024-05-24 20:48:14,092 INFO MainThread:34 [wandb_init.py:_pause_backend():432] pausing backend
195
+ 2024-05-24 20:48:16,125 INFO MainThread:34 [wandb_init.py:_resume_backend():437] resuming backend
196
+ 2024-05-24 20:48:40,373 INFO MainThread:34 [wandb_run.py:_config_callback():1347] config_cb None None {'return_dict': True, 'output_hidden_states': False, 'output_attentions': False, 'torchscript': False, 'torch_dtype': None, 'use_bfloat16': False, 'tf_legacy_loss': False, 'pruned_heads': {}, 'tie_word_embeddings': True, 'chunk_size_feed_forward': 0, 'is_encoder_decoder': False, 'is_decoder': False, 'cross_attention_hidden_size': None, 'add_cross_attention': False, 'tie_encoder_decoder': False, 'max_length': 20, 'min_length': 0, 'do_sample': False, 'early_stopping': False, 'num_beams': 1, 'num_beam_groups': 1, 'diversity_penalty': 0.0, 'temperature': 1.0, 'top_k': 50, 'top_p': 1.0, 'typical_p': 1.0, 'repetition_penalty': 1.0, 'length_penalty': 1.0, 'no_repeat_ngram_size': 0, 'encoder_no_repeat_ngram_size': 0, 'bad_words_ids': None, 'num_return_sequences': 1, 'output_scores': False, 'return_dict_in_generate': False, 'forced_bos_token_id': None, 'forced_eos_token_id': None, 'remove_invalid_values': False, 'exponential_decay_length_penalty': None, 'suppress_tokens': None, 'begin_suppress_tokens': None, 'architectures': ['Wav2Vec2ForPreTraining'], 'finetuning_task': None, 'id2label': {0: 'LABEL_0', 1: 'LABEL_1'}, 'label2id': {'LABEL_0': 0, 'LABEL_1': 1}, 'tokenizer_class': None, 'prefix': None, 'bos_token_id': 1, 'pad_token_id': 0, 'eos_token_id': 2, 'sep_token_id': None, 'decoder_start_token_id': None, 'task_specific_params': None, 'problem_type': None, '_name_or_path': 'facebook/wav2vec2-base', 'transformers_version': '4.39.3', 'freeze_feat_extract_train': True, 'mask_channel_length': 10, 'mask_channel_min_space': 1, 'mask_channel_other': 0.0, 'mask_channel_prob': 0.0, 'mask_channel_selection': 'static', 'mask_time_min_space': 1, 'mask_time_other': 0.0, 'mask_time_selection': 'static', 'model_type': 'wav2vec2', 'no_mask_channel_overlap': False, 'no_mask_time_overlap': False, 'num_feat_extract_layers': 7, 'hidden_size': 768, 'feat_extract_norm': 'group', 'feat_extract_activation': 'gelu', 'conv_dim': [512, 512, 512, 512, 512, 512, 512], 'conv_stride': [5, 2, 2, 2, 2, 2, 2], 'conv_kernel': [10, 3, 3, 3, 3, 2, 2], 'conv_bias': False, 'num_conv_pos_embeddings': 128, 'num_conv_pos_embedding_groups': 16, 'num_hidden_layers': 12, 'intermediate_size': 3072, 'hidden_act': 'gelu', 'num_attention_heads': 12, 'hidden_dropout': 0.1, 'attention_dropout': 0.1, 'activation_dropout': 0.0, 'feat_proj_dropout': 0.1, 'final_dropout': 0.0, 'layerdrop': 0.0, 'layer_norm_eps': 1e-05, 'initializer_range': 0.02, 'vocab_size': 32, 'do_stable_layer_norm': False, 'use_weighted_layer_sum': False, 'apply_spec_augment': True, 'mask_time_prob': 0.05, 'mask_time_length': 10, 'mask_time_min_masks': 2, 'mask_feature_prob': 0.0, 'mask_feature_length': 10, 'mask_feature_min_masks': 0, 'num_codevectors_per_group': 320, 'num_codevector_groups': 2, 'contrastive_logits_temperature': 0.1, 'feat_quantizer_dropout': 0.0, 'num_negatives': 100, 'codevector_dim': 256, 'proj_codevector_dim': 256, 'diversity_loss_weight': 0.1, 'ctc_loss_reduction': 'sum', 'ctc_zero_infinity': False, 'add_adapter': False, 'adapter_kernel_size': 3, 'adapter_stride': 2, 'num_adapter_layers': 3, 'output_hidden_size': 768, 'adapter_attn_dim': None, 'classifier_proj_size': 256, 'tdnn_dim': [512, 512, 512, 512, 1500], 'tdnn_kernel': [5, 3, 3, 1, 1], 'tdnn_dilation': [1, 2, 3, 1, 1], 'xvector_output_dim': 512, 'output_dir': '/kaggle/working/', 'overwrite_output_dir': False, 'do_train': False, 'do_eval': True, 'do_predict': False, 'evaluation_strategy': 'steps', 'prediction_loss_only': False, 'per_device_train_batch_size': 16, 'per_device_eval_batch_size': 8, 'per_gpu_train_batch_size': None, 'per_gpu_eval_batch_size': None, 'gradient_accumulation_steps': 1, 'eval_accumulation_steps': None, 'eval_delay': 0, 'learning_rate': 0.0001, 'weight_decay': 0.005, 'adam_beta1': 0.9, 'adam_beta2': 0.999, 'adam_epsilon': 1e-08, 'max_grad_norm': 1.0, 'num_train_epochs': 30, 'max_steps': -1, 'lr_scheduler_type': 'linear', 'lr_scheduler_kwargs': {}, 'warmup_ratio': 0.0, 'warmup_steps': 1000, 'log_level': 'passive', 'log_level_replica': 'warning', 'log_on_each_node': True, 'logging_dir': '/kaggle/working/runs/May24_20-48-13_890e8b3ca76b', 'logging_strategy': 'steps', 'logging_first_step': False, 'logging_steps': 500, 'logging_nan_inf_filter': True, 'save_strategy': 'steps', 'save_steps': 500, 'save_total_limit': 2, 'save_safetensors': True, 'save_on_each_node': False, 'save_only_model': False, 'no_cuda': False, 'use_cpu': False, 'use_mps_device': False, 'seed': 42, 'data_seed': None, 'jit_mode_eval': False, 'use_ipex': False, 'bf16': False, 'fp16': True, 'fp16_opt_level': 'O1', 'half_precision_backend': 'auto', 'bf16_full_eval': False, 'fp16_full_eval': False, 'tf32': None, 'local_rank': 0, 'ddp_backend': None, 'tpu_num_cores': None, 'tpu_metrics_debug': False, 'debug': [], 'dataloader_drop_last': False, 'eval_steps': 500, 'dataloader_num_workers': 0, 'dataloader_prefetch_factor': None, 'past_index': -1, 'run_name': '/kaggle/working/', 'disable_tqdm': False, 'remove_unused_columns': True, 'label_names': None, 'load_best_model_at_end': False, 'metric_for_best_model': None, 'greater_is_better': None, 'ignore_data_skip': False, 'fsdp': [], 'fsdp_min_num_params': 0, 'fsdp_config': {'min_num_params': 0, 'xla': False, 'xla_fsdp_v2': False, 'xla_fsdp_grad_ckpt': False}, 'fsdp_transformer_layer_cls_to_wrap': None, 'accelerator_config': {'split_batches': False, 'dispatch_batches': None, 'even_batches': True, 'use_seedable_sampler': True}, 'deepspeed': None, 'label_smoothing_factor': 0.0, 'optim': 'adamw_torch', 'optim_args': None, 'adafactor': False, 'group_by_length': True, 'length_column_name': 'length', 'report_to': ['tensorboard', 'wandb'], 'ddp_find_unused_parameters': None, 'ddp_bucket_cap_mb': None, 'ddp_broadcast_buffers': None, 'dataloader_pin_memory': True, 'dataloader_persistent_workers': False, 'skip_memory_metrics': True, 'use_legacy_prediction_loop': False, 'push_to_hub': True, 'resume_from_checkpoint': None, 'hub_model_id': None, 'hub_strategy': 'every_save', 'hub_token': '<HUB_TOKEN>', 'hub_private_repo': False, 'hub_always_push': False, 'gradient_checkpointing': True, 'gradient_checkpointing_kwargs': None, 'include_inputs_for_metrics': False, 'fp16_backend': 'auto', 'push_to_hub_model_id': None, 'push_to_hub_organization': None, 'push_to_hub_token': '<PUSH_TO_HUB_TOKEN>', 'mp_parameters': '', 'auto_find_batch_size': False, 'full_determinism': False, 'torchdynamo': None, 'ray_scope': 'last', 'ddp_timeout': 1800, 'torch_compile': False, 'torch_compile_backend': None, 'torch_compile_mode': None, 'dispatch_batches': None, 'split_batches': None, 'include_tokens_per_second': False, 'include_num_input_tokens_seen': False, 'neftune_noise_alpha': None, 'optim_target_modules': None}
197
+ 2024-05-24 20:48:42,631 INFO MainThread:34 [jupyter.py:save_ipynb():373] not saving jupyter notebook
198
+ 2024-05-24 20:48:42,632 INFO MainThread:34 [wandb_init.py:_pause_backend():432] pausing backend
199
+ 2024-05-24 20:49:00,381 INFO MainThread:34 [wandb_init.py:_resume_backend():437] resuming backend
200
+ 2024-05-24 20:49:00,425 INFO MainThread:34 [jupyter.py:save_ipynb():373] not saving jupyter notebook
201
+ 2024-05-24 20:49:00,425 INFO MainThread:34 [wandb_init.py:_pause_backend():432] pausing backend
202
+ 2024-05-24 20:49:03,721 INFO MainThread:34 [wandb_init.py:_resume_backend():437] resuming backend
203
+ 2024-05-24 20:49:03,725 INFO MainThread:34 [jupyter.py:save_ipynb():373] not saving jupyter notebook
204
+ 2024-05-24 20:49:03,726 INFO MainThread:34 [wandb_init.py:_pause_backend():432] pausing backend
205
+ 2024-05-24 20:49:08,089 INFO MainThread:34 [wandb_init.py:_resume_backend():437] resuming backend
206
+ 2024-05-24 20:49:08,455 INFO MainThread:34 [jupyter.py:save_ipynb():373] not saving jupyter notebook
207
+ 2024-05-24 20:49:08,456 INFO MainThread:34 [wandb_init.py:_pause_backend():432] pausing backend
208
+ 2024-05-24 20:51:52,779 INFO MainThread:34 [wandb_init.py:_resume_backend():437] resuming backend
209
+ 2024-05-24 20:51:52,781 INFO MainThread:34 [jupyter.py:save_ipynb():373] not saving jupyter notebook
210
+ 2024-05-24 20:51:52,781 INFO MainThread:34 [wandb_init.py:_pause_backend():432] pausing backend
211
+ 2024-05-24 20:51:56,593 INFO MainThread:34 [wandb_init.py:_resume_backend():437] resuming backend
212
+ 2024-05-24 20:52:22,759 INFO MainThread:34 [jupyter.py:save_ipynb():373] not saving jupyter notebook
213
+ 2024-05-24 20:52:22,759 INFO MainThread:34 [wandb_init.py:_pause_backend():432] pausing backend
214
+ 2024-05-24 20:52:49,570 INFO MainThread:34 [wandb_init.py:_resume_backend():437] resuming backend
215
+ 2024-05-24 20:52:49,572 INFO MainThread:34 [jupyter.py:save_ipynb():373] not saving jupyter notebook
216
+ 2024-05-24 20:52:49,572 INFO MainThread:34 [wandb_init.py:_pause_backend():432] pausing backend
217
+ 2024-05-24 20:52:52,107 INFO MainThread:34 [wandb_init.py:_resume_backend():437] resuming backend
wandb/run-20240524_202737-n1w0kmmv/files/conda-environment.yaml ADDED
File without changes
wandb/run-20240524_202737-n1w0kmmv/files/config.yaml ADDED
@@ -0,0 +1,866 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ wandb_version: 1
2
+
3
+ _wandb:
4
+ desc: null
5
+ value:
6
+ python_version: 3.10.13
7
+ cli_version: 0.16.6
8
+ framework: huggingface
9
+ huggingface_version: 4.39.3
10
+ is_jupyter_run: true
11
+ is_kaggle_kernel: true
12
+ start_time: 1716582457.0
13
+ t:
14
+ 1:
15
+ - 1
16
+ - 2
17
+ - 3
18
+ - 5
19
+ - 11
20
+ - 12
21
+ - 49
22
+ - 51
23
+ - 53
24
+ - 55
25
+ - 71
26
+ - 105
27
+ 2:
28
+ - 1
29
+ - 2
30
+ - 3
31
+ - 5
32
+ - 11
33
+ - 12
34
+ - 49
35
+ - 51
36
+ - 53
37
+ - 55
38
+ - 71
39
+ - 105
40
+ 3:
41
+ - 7
42
+ - 23
43
+ 4: 3.10.13
44
+ 5: 0.16.6
45
+ 6: 4.39.3
46
+ 8:
47
+ - 1
48
+ - 2
49
+ - 5
50
+ 9:
51
+ 1: transformers_trainer
52
+ 13: linux-x86_64
53
+ m:
54
+ - 1: train/global_step
55
+ 6:
56
+ - 3
57
+ - 1: train/loss
58
+ 5: 1
59
+ 6:
60
+ - 1
61
+ - 1: train/grad_norm
62
+ 5: 1
63
+ 6:
64
+ - 1
65
+ - 1: train/learning_rate
66
+ 5: 1
67
+ 6:
68
+ - 1
69
+ - 1: train/epoch
70
+ 5: 1
71
+ 6:
72
+ - 1
73
+ - 1: eval/loss
74
+ 5: 1
75
+ 6:
76
+ - 1
77
+ - 1: eval/wer
78
+ 5: 1
79
+ 6:
80
+ - 1
81
+ - 1: eval/runtime
82
+ 5: 1
83
+ 6:
84
+ - 1
85
+ - 1: eval/samples_per_second
86
+ 5: 1
87
+ 6:
88
+ - 1
89
+ - 1: eval/steps_per_second
90
+ 5: 1
91
+ 6:
92
+ - 1
93
+ return_dict:
94
+ desc: null
95
+ value: true
96
+ output_hidden_states:
97
+ desc: null
98
+ value: false
99
+ output_attentions:
100
+ desc: null
101
+ value: false
102
+ torchscript:
103
+ desc: null
104
+ value: false
105
+ torch_dtype:
106
+ desc: null
107
+ value: null
108
+ use_bfloat16:
109
+ desc: null
110
+ value: false
111
+ tf_legacy_loss:
112
+ desc: null
113
+ value: false
114
+ pruned_heads:
115
+ desc: null
116
+ value: {}
117
+ tie_word_embeddings:
118
+ desc: null
119
+ value: true
120
+ chunk_size_feed_forward:
121
+ desc: null
122
+ value: 0
123
+ is_encoder_decoder:
124
+ desc: null
125
+ value: false
126
+ is_decoder:
127
+ desc: null
128
+ value: false
129
+ cross_attention_hidden_size:
130
+ desc: null
131
+ value: null
132
+ add_cross_attention:
133
+ desc: null
134
+ value: false
135
+ tie_encoder_decoder:
136
+ desc: null
137
+ value: false
138
+ max_length:
139
+ desc: null
140
+ value: 20
141
+ min_length:
142
+ desc: null
143
+ value: 0
144
+ do_sample:
145
+ desc: null
146
+ value: false
147
+ early_stopping:
148
+ desc: null
149
+ value: false
150
+ num_beams:
151
+ desc: null
152
+ value: 1
153
+ num_beam_groups:
154
+ desc: null
155
+ value: 1
156
+ diversity_penalty:
157
+ desc: null
158
+ value: 0.0
159
+ temperature:
160
+ desc: null
161
+ value: 1.0
162
+ top_k:
163
+ desc: null
164
+ value: 50
165
+ top_p:
166
+ desc: null
167
+ value: 1.0
168
+ typical_p:
169
+ desc: null
170
+ value: 1.0
171
+ repetition_penalty:
172
+ desc: null
173
+ value: 1.0
174
+ length_penalty:
175
+ desc: null
176
+ value: 1.0
177
+ no_repeat_ngram_size:
178
+ desc: null
179
+ value: 0
180
+ encoder_no_repeat_ngram_size:
181
+ desc: null
182
+ value: 0
183
+ bad_words_ids:
184
+ desc: null
185
+ value: null
186
+ num_return_sequences:
187
+ desc: null
188
+ value: 1
189
+ output_scores:
190
+ desc: null
191
+ value: false
192
+ return_dict_in_generate:
193
+ desc: null
194
+ value: false
195
+ forced_bos_token_id:
196
+ desc: null
197
+ value: null
198
+ forced_eos_token_id:
199
+ desc: null
200
+ value: null
201
+ remove_invalid_values:
202
+ desc: null
203
+ value: false
204
+ exponential_decay_length_penalty:
205
+ desc: null
206
+ value: null
207
+ suppress_tokens:
208
+ desc: null
209
+ value: null
210
+ begin_suppress_tokens:
211
+ desc: null
212
+ value: null
213
+ architectures:
214
+ desc: null
215
+ value:
216
+ - Wav2Vec2ForPreTraining
217
+ finetuning_task:
218
+ desc: null
219
+ value: null
220
+ id2label:
221
+ desc: null
222
+ value:
223
+ '0': LABEL_0
224
+ '1': LABEL_1
225
+ label2id:
226
+ desc: null
227
+ value:
228
+ LABEL_0: 0
229
+ LABEL_1: 1
230
+ tokenizer_class:
231
+ desc: null
232
+ value: null
233
+ prefix:
234
+ desc: null
235
+ value: null
236
+ bos_token_id:
237
+ desc: null
238
+ value: 1
239
+ pad_token_id:
240
+ desc: null
241
+ value: 0
242
+ eos_token_id:
243
+ desc: null
244
+ value: 2
245
+ sep_token_id:
246
+ desc: null
247
+ value: null
248
+ decoder_start_token_id:
249
+ desc: null
250
+ value: null
251
+ task_specific_params:
252
+ desc: null
253
+ value: null
254
+ problem_type:
255
+ desc: null
256
+ value: null
257
+ _name_or_path:
258
+ desc: null
259
+ value: facebook/wav2vec2-base
260
+ transformers_version:
261
+ desc: null
262
+ value: 4.39.3
263
+ freeze_feat_extract_train:
264
+ desc: null
265
+ value: true
266
+ mask_channel_length:
267
+ desc: null
268
+ value: 10
269
+ mask_channel_min_space:
270
+ desc: null
271
+ value: 1
272
+ mask_channel_other:
273
+ desc: null
274
+ value: 0.0
275
+ mask_channel_prob:
276
+ desc: null
277
+ value: 0.0
278
+ mask_channel_selection:
279
+ desc: null
280
+ value: static
281
+ mask_time_min_space:
282
+ desc: null
283
+ value: 1
284
+ mask_time_other:
285
+ desc: null
286
+ value: 0.0
287
+ mask_time_selection:
288
+ desc: null
289
+ value: static
290
+ model_type:
291
+ desc: null
292
+ value: wav2vec2
293
+ no_mask_channel_overlap:
294
+ desc: null
295
+ value: false
296
+ no_mask_time_overlap:
297
+ desc: null
298
+ value: false
299
+ num_feat_extract_layers:
300
+ desc: null
301
+ value: 7
302
+ hidden_size:
303
+ desc: null
304
+ value: 768
305
+ feat_extract_norm:
306
+ desc: null
307
+ value: group
308
+ feat_extract_activation:
309
+ desc: null
310
+ value: gelu
311
+ conv_dim:
312
+ desc: null
313
+ value:
314
+ - 512
315
+ - 512
316
+ - 512
317
+ - 512
318
+ - 512
319
+ - 512
320
+ - 512
321
+ conv_stride:
322
+ desc: null
323
+ value:
324
+ - 5
325
+ - 2
326
+ - 2
327
+ - 2
328
+ - 2
329
+ - 2
330
+ - 2
331
+ conv_kernel:
332
+ desc: null
333
+ value:
334
+ - 10
335
+ - 3
336
+ - 3
337
+ - 3
338
+ - 3
339
+ - 2
340
+ - 2
341
+ conv_bias:
342
+ desc: null
343
+ value: false
344
+ num_conv_pos_embeddings:
345
+ desc: null
346
+ value: 128
347
+ num_conv_pos_embedding_groups:
348
+ desc: null
349
+ value: 16
350
+ num_hidden_layers:
351
+ desc: null
352
+ value: 12
353
+ intermediate_size:
354
+ desc: null
355
+ value: 3072
356
+ hidden_act:
357
+ desc: null
358
+ value: gelu
359
+ num_attention_heads:
360
+ desc: null
361
+ value: 12
362
+ hidden_dropout:
363
+ desc: null
364
+ value: 0.1
365
+ attention_dropout:
366
+ desc: null
367
+ value: 0.1
368
+ activation_dropout:
369
+ desc: null
370
+ value: 0.0
371
+ feat_proj_dropout:
372
+ desc: null
373
+ value: 0.1
374
+ final_dropout:
375
+ desc: null
376
+ value: 0.0
377
+ layerdrop:
378
+ desc: null
379
+ value: 0.0
380
+ layer_norm_eps:
381
+ desc: null
382
+ value: 1.0e-05
383
+ initializer_range:
384
+ desc: null
385
+ value: 0.02
386
+ vocab_size:
387
+ desc: null
388
+ value: 32
389
+ do_stable_layer_norm:
390
+ desc: null
391
+ value: false
392
+ use_weighted_layer_sum:
393
+ desc: null
394
+ value: false
395
+ apply_spec_augment:
396
+ desc: null
397
+ value: true
398
+ mask_time_prob:
399
+ desc: null
400
+ value: 0.05
401
+ mask_time_length:
402
+ desc: null
403
+ value: 10
404
+ mask_time_min_masks:
405
+ desc: null
406
+ value: 2
407
+ mask_feature_prob:
408
+ desc: null
409
+ value: 0.0
410
+ mask_feature_length:
411
+ desc: null
412
+ value: 10
413
+ mask_feature_min_masks:
414
+ desc: null
415
+ value: 0
416
+ num_codevectors_per_group:
417
+ desc: null
418
+ value: 320
419
+ num_codevector_groups:
420
+ desc: null
421
+ value: 2
422
+ contrastive_logits_temperature:
423
+ desc: null
424
+ value: 0.1
425
+ feat_quantizer_dropout:
426
+ desc: null
427
+ value: 0.0
428
+ num_negatives:
429
+ desc: null
430
+ value: 100
431
+ codevector_dim:
432
+ desc: null
433
+ value: 256
434
+ proj_codevector_dim:
435
+ desc: null
436
+ value: 256
437
+ diversity_loss_weight:
438
+ desc: null
439
+ value: 0.1
440
+ ctc_loss_reduction:
441
+ desc: null
442
+ value: sum
443
+ ctc_zero_infinity:
444
+ desc: null
445
+ value: false
446
+ add_adapter:
447
+ desc: null
448
+ value: false
449
+ adapter_kernel_size:
450
+ desc: null
451
+ value: 3
452
+ adapter_stride:
453
+ desc: null
454
+ value: 2
455
+ num_adapter_layers:
456
+ desc: null
457
+ value: 3
458
+ output_hidden_size:
459
+ desc: null
460
+ value: 768
461
+ adapter_attn_dim:
462
+ desc: null
463
+ value: null
464
+ classifier_proj_size:
465
+ desc: null
466
+ value: 256
467
+ tdnn_dim:
468
+ desc: null
469
+ value:
470
+ - 512
471
+ - 512
472
+ - 512
473
+ - 512
474
+ - 1500
475
+ tdnn_kernel:
476
+ desc: null
477
+ value:
478
+ - 5
479
+ - 3
480
+ - 3
481
+ - 1
482
+ - 1
483
+ tdnn_dilation:
484
+ desc: null
485
+ value:
486
+ - 1
487
+ - 2
488
+ - 3
489
+ - 1
490
+ - 1
491
+ xvector_output_dim:
492
+ desc: null
493
+ value: 512
494
+ output_dir:
495
+ desc: null
496
+ value: /kaggle/working/
497
+ overwrite_output_dir:
498
+ desc: null
499
+ value: false
500
+ do_train:
501
+ desc: null
502
+ value: false
503
+ do_eval:
504
+ desc: null
505
+ value: true
506
+ do_predict:
507
+ desc: null
508
+ value: false
509
+ evaluation_strategy:
510
+ desc: null
511
+ value: steps
512
+ prediction_loss_only:
513
+ desc: null
514
+ value: false
515
+ per_device_train_batch_size:
516
+ desc: null
517
+ value: 16
518
+ per_device_eval_batch_size:
519
+ desc: null
520
+ value: 8
521
+ per_gpu_train_batch_size:
522
+ desc: null
523
+ value: null
524
+ per_gpu_eval_batch_size:
525
+ desc: null
526
+ value: null
527
+ gradient_accumulation_steps:
528
+ desc: null
529
+ value: 1
530
+ eval_accumulation_steps:
531
+ desc: null
532
+ value: null
533
+ eval_delay:
534
+ desc: null
535
+ value: 0
536
+ learning_rate:
537
+ desc: null
538
+ value: 0.0001
539
+ weight_decay:
540
+ desc: null
541
+ value: 0.005
542
+ adam_beta1:
543
+ desc: null
544
+ value: 0.9
545
+ adam_beta2:
546
+ desc: null
547
+ value: 0.999
548
+ adam_epsilon:
549
+ desc: null
550
+ value: 1.0e-08
551
+ max_grad_norm:
552
+ desc: null
553
+ value: 1.0
554
+ num_train_epochs:
555
+ desc: null
556
+ value: 30
557
+ max_steps:
558
+ desc: null
559
+ value: -1
560
+ lr_scheduler_type:
561
+ desc: null
562
+ value: linear
563
+ lr_scheduler_kwargs:
564
+ desc: null
565
+ value: {}
566
+ warmup_ratio:
567
+ desc: null
568
+ value: 0.0
569
+ warmup_steps:
570
+ desc: null
571
+ value: 1000
572
+ log_level:
573
+ desc: null
574
+ value: passive
575
+ log_level_replica:
576
+ desc: null
577
+ value: warning
578
+ log_on_each_node:
579
+ desc: null
580
+ value: true
581
+ logging_dir:
582
+ desc: null
583
+ value: /kaggle/working/runs/May24_20-48-13_890e8b3ca76b
584
+ logging_strategy:
585
+ desc: null
586
+ value: steps
587
+ logging_first_step:
588
+ desc: null
589
+ value: false
590
+ logging_steps:
591
+ desc: null
592
+ value: 500
593
+ logging_nan_inf_filter:
594
+ desc: null
595
+ value: true
596
+ save_strategy:
597
+ desc: null
598
+ value: steps
599
+ save_steps:
600
+ desc: null
601
+ value: 500
602
+ save_total_limit:
603
+ desc: null
604
+ value: 2
605
+ save_safetensors:
606
+ desc: null
607
+ value: true
608
+ save_on_each_node:
609
+ desc: null
610
+ value: false
611
+ save_only_model:
612
+ desc: null
613
+ value: false
614
+ no_cuda:
615
+ desc: null
616
+ value: false
617
+ use_cpu:
618
+ desc: null
619
+ value: false
620
+ use_mps_device:
621
+ desc: null
622
+ value: false
623
+ seed:
624
+ desc: null
625
+ value: 42
626
+ data_seed:
627
+ desc: null
628
+ value: null
629
+ jit_mode_eval:
630
+ desc: null
631
+ value: false
632
+ use_ipex:
633
+ desc: null
634
+ value: false
635
+ bf16:
636
+ desc: null
637
+ value: false
638
+ fp16:
639
+ desc: null
640
+ value: true
641
+ fp16_opt_level:
642
+ desc: null
643
+ value: O1
644
+ half_precision_backend:
645
+ desc: null
646
+ value: auto
647
+ bf16_full_eval:
648
+ desc: null
649
+ value: false
650
+ fp16_full_eval:
651
+ desc: null
652
+ value: false
653
+ tf32:
654
+ desc: null
655
+ value: null
656
+ local_rank:
657
+ desc: null
658
+ value: 0
659
+ ddp_backend:
660
+ desc: null
661
+ value: null
662
+ tpu_num_cores:
663
+ desc: null
664
+ value: null
665
+ tpu_metrics_debug:
666
+ desc: null
667
+ value: false
668
+ debug:
669
+ desc: null
670
+ value: []
671
+ dataloader_drop_last:
672
+ desc: null
673
+ value: false
674
+ eval_steps:
675
+ desc: null
676
+ value: 500
677
+ dataloader_num_workers:
678
+ desc: null
679
+ value: 0
680
+ dataloader_prefetch_factor:
681
+ desc: null
682
+ value: null
683
+ past_index:
684
+ desc: null
685
+ value: -1
686
+ run_name:
687
+ desc: null
688
+ value: /kaggle/working/
689
+ disable_tqdm:
690
+ desc: null
691
+ value: false
692
+ remove_unused_columns:
693
+ desc: null
694
+ value: true
695
+ label_names:
696
+ desc: null
697
+ value: null
698
+ load_best_model_at_end:
699
+ desc: null
700
+ value: false
701
+ metric_for_best_model:
702
+ desc: null
703
+ value: null
704
+ greater_is_better:
705
+ desc: null
706
+ value: null
707
+ ignore_data_skip:
708
+ desc: null
709
+ value: false
710
+ fsdp:
711
+ desc: null
712
+ value: []
713
+ fsdp_min_num_params:
714
+ desc: null
715
+ value: 0
716
+ fsdp_config:
717
+ desc: null
718
+ value:
719
+ min_num_params: 0
720
+ xla: false
721
+ xla_fsdp_v2: false
722
+ xla_fsdp_grad_ckpt: false
723
+ fsdp_transformer_layer_cls_to_wrap:
724
+ desc: null
725
+ value: null
726
+ accelerator_config:
727
+ desc: null
728
+ value:
729
+ split_batches: false
730
+ dispatch_batches: null
731
+ even_batches: true
732
+ use_seedable_sampler: true
733
+ deepspeed:
734
+ desc: null
735
+ value: null
736
+ label_smoothing_factor:
737
+ desc: null
738
+ value: 0.0
739
+ optim:
740
+ desc: null
741
+ value: adamw_torch
742
+ optim_args:
743
+ desc: null
744
+ value: null
745
+ adafactor:
746
+ desc: null
747
+ value: false
748
+ group_by_length:
749
+ desc: null
750
+ value: true
751
+ length_column_name:
752
+ desc: null
753
+ value: length
754
+ report_to:
755
+ desc: null
756
+ value:
757
+ - tensorboard
758
+ - wandb
759
+ ddp_find_unused_parameters:
760
+ desc: null
761
+ value: null
762
+ ddp_bucket_cap_mb:
763
+ desc: null
764
+ value: null
765
+ ddp_broadcast_buffers:
766
+ desc: null
767
+ value: null
768
+ dataloader_pin_memory:
769
+ desc: null
770
+ value: true
771
+ dataloader_persistent_workers:
772
+ desc: null
773
+ value: false
774
+ skip_memory_metrics:
775
+ desc: null
776
+ value: true
777
+ use_legacy_prediction_loop:
778
+ desc: null
779
+ value: false
780
+ push_to_hub:
781
+ desc: null
782
+ value: true
783
+ resume_from_checkpoint:
784
+ desc: null
785
+ value: null
786
+ hub_model_id:
787
+ desc: null
788
+ value: null
789
+ hub_strategy:
790
+ desc: null
791
+ value: every_save
792
+ hub_token:
793
+ desc: null
794
+ value: <HUB_TOKEN>
795
+ hub_private_repo:
796
+ desc: null
797
+ value: false
798
+ hub_always_push:
799
+ desc: null
800
+ value: false
801
+ gradient_checkpointing:
802
+ desc: null
803
+ value: true
804
+ gradient_checkpointing_kwargs:
805
+ desc: null
806
+ value: null
807
+ include_inputs_for_metrics:
808
+ desc: null
809
+ value: false
810
+ fp16_backend:
811
+ desc: null
812
+ value: auto
813
+ push_to_hub_model_id:
814
+ desc: null
815
+ value: null
816
+ push_to_hub_organization:
817
+ desc: null
818
+ value: null
819
+ push_to_hub_token:
820
+ desc: null
821
+ value: <PUSH_TO_HUB_TOKEN>
822
+ mp_parameters:
823
+ desc: null
824
+ value: ''
825
+ auto_find_batch_size:
826
+ desc: null
827
+ value: false
828
+ full_determinism:
829
+ desc: null
830
+ value: false
831
+ torchdynamo:
832
+ desc: null
833
+ value: null
834
+ ray_scope:
835
+ desc: null
836
+ value: last
837
+ ddp_timeout:
838
+ desc: null
839
+ value: 1800
840
+ torch_compile:
841
+ desc: null
842
+ value: false
843
+ torch_compile_backend:
844
+ desc: null
845
+ value: null
846
+ torch_compile_mode:
847
+ desc: null
848
+ value: null
849
+ dispatch_batches:
850
+ desc: null
851
+ value: null
852
+ split_batches:
853
+ desc: null
854
+ value: null
855
+ include_tokens_per_second:
856
+ desc: null
857
+ value: false
858
+ include_num_input_tokens_seen:
859
+ desc: null
860
+ value: false
861
+ neftune_noise_alpha:
862
+ desc: null
863
+ value: null
864
+ optim_target_modules:
865
+ desc: null
866
+ value: null
wandb/run-20240524_202737-n1w0kmmv/files/output.log ADDED
@@ -0,0 +1,171 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /opt/conda/lib/python3.10/site-packages/transformers/models/wav2vec2/processing_wav2vec2.py:156: UserWarning: `as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your labels by using the argument `text` of the regular `__call__` method (either in the same call as your audio inputs, or in a separate call.
2
+ warnings.warn(
3
+ /opt/conda/lib/python3.10/site-packages/torch/utils/checkpoint.py:429: UserWarning: torch.utils.checkpoint: please pass in use_reentrant=True or use_reentrant=False explicitly. The default value of use_reentrant will be updated to be False in the future. To maintain current behavior, pass use_reentrant=True. It is recommended that you use use_reentrant=False. Refer to docs for more details on the differences between the two variants.
4
+ warnings.warn(
5
+ Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained.
6
+ /opt/conda/lib/python3.10/site-packages/transformers/configuration_utils.py:363: UserWarning: Passing `gradient_checkpointing` to a config initialization is deprecated and will be removed in v5 Transformers. Using `model.gradient_checkpointing_enable()` instead, or if you are using the `Trainer` API, pass `gradient_checkpointing=True` in your `TrainingArguments`.
7
+ warnings.warn(
8
+ Some weights of Wav2Vec2ForCTC were not initialized from the model checkpoint at facebook/wav2vec2-base and are newly initialized: ['lm_head.bias', 'lm_head.weight', 'wav2vec2.encoder.pos_conv_embed.conv.parametrizations.weight.original0', 'wav2vec2.encoder.pos_conv_embed.conv.parametrizations.weight.original1']
9
+ You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference.
10
+ /opt/conda/lib/python3.10/site-packages/datasets/load.py:756: FutureWarning: The repository for wer contains custom code which must be executed to correctly load the metric. You can inspect the repository content at https://raw.githubusercontent.com/huggingface/datasets/2.18.0/metrics/wer/wer.py
11
+ You can avoid this message in future by passing the argument `trust_remote_code=True`.
12
+ Passing `trust_remote_code=True` will be mandatory to load this metric from the next major release of `datasets`.
13
+ warnings.warn(
14
+ /opt/conda/lib/python3.10/site-packages/accelerate/accelerator.py:436: FutureWarning: Passing the following arguments to `Accelerator` is deprecated and will be removed in version 1.0 of Accelerate: dict_keys(['dispatch_batches', 'split_batches', 'even_batches', 'use_seedable_sampler']). Please pass an `accelerate.DataLoaderConfiguration` instead:
15
+ dataloader_config = DataLoaderConfiguration(dispatch_batches=None, split_batches=False, even_batches=True, use_seedable_sampler=True)
16
+ warnings.warn(
17
+ wandb: WARNING Calling wandb.login() after wandb.init() has no effect.
18
+ /opt/conda/lib/python3.10/site-packages/transformers/models/wav2vec2/processing_wav2vec2.py:156: UserWarning: `as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your labels by using the argument `text` of the regular `__call__` method (either in the same call as your audio inputs, or in a separate call.
19
+ warnings.warn(
20
+ /opt/conda/lib/python3.10/site-packages/torch/utils/checkpoint.py:429: UserWarning: torch.utils.checkpoint: please pass in use_reentrant=True or use_reentrant=False explicitly. The default value of use_reentrant will be updated to be False in the future. To maintain current behavior, pass use_reentrant=True. It is recommended that you use use_reentrant=False. Refer to docs for more details on the differences between the two variants.
21
+ warnings.warn(
22
+ /opt/conda/lib/python3.10/site-packages/accelerate/accelerator.py:436: FutureWarning: Passing the following arguments to `Accelerator` is deprecated and will be removed in version 1.0 of Accelerate: dict_keys(['dispatch_batches', 'split_batches', 'even_batches', 'use_seedable_sampler']). Please pass an `accelerate.DataLoaderConfiguration` instead:
23
+ dataloader_config = DataLoaderConfiguration(dispatch_batches=None, split_batches=False, even_batches=True, use_seedable_sampler=True)
24
+ warnings.warn(
25
+ /opt/conda/lib/python3.10/site-packages/transformers/models/wav2vec2/processing_wav2vec2.py:156: UserWarning: `as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your labels by using the argument `text` of the regular `__call__` method (either in the same call as your audio inputs, or in a separate call.
26
+ warnings.warn(
27
+ /opt/conda/lib/python3.10/site-packages/torch/utils/checkpoint.py:429: UserWarning: torch.utils.checkpoint: please pass in use_reentrant=True or use_reentrant=False explicitly. The default value of use_reentrant will be updated to be False in the future. To maintain current behavior, pass use_reentrant=True. It is recommended that you use use_reentrant=False. Refer to docs for more details on the differences between the two variants.
28
+ warnings.warn(
29
+ 0 지금부터 그러면 본격적으로 우리가 도련동�� 대해서 조사를 할 거라 예? 이 마을이 ...
30
+ 1 예, 그건 한 칠백년 전에 이제 그 설촌이 시작이 되었다고 헙니다.
31
+ 2 예. 칠백년 전에 설촌이 뒈엇는데 이제 그루후에 이제 성씨들이 여러 성씨들이 많이 ...
32
+ 3 예. 그러면은 칠백년부터 허는데 설촌할 때 어떤 성씨들이 헷덴 말도 이신가마씨?
33
+ 4 그 다음 양씨. 고씨. 마 대략적으로 요런 순서가 뒘서양.
34
+ ...
35
+ 995 이제 뭘 부린다고 허까. 이제 사람이면 그 살짝살짝 빼여지곡 뭐허곡 헹근에 일을 아...
36
+ 996 그런 그 뭐가 잇는데. 에 짐 실런 오단에 그 구루마 찬, 참 그 마차 찬 차 그냥...
37
+ 997 것도 딴 거 보민 그 소가 그 좀 소머리도 그런 거 보민 영리허긴 영 리헌 거라. ...
38
+ 998 아무 거 그런 거 저런 거 안 보면은 묵묵히 일을 잘허는데, 그런 거 보면은 이제 ...
39
+ 999 이제 힘이 좀 약헌 사름안티 가면은 그냥 제멋대로 임잘 막 놀려먹 주.
40
+ Name: text, Length: 1000, dtype: object
41
+ 1000
42
+ file \
43
+ 0 /kaggle/input/prepared-dataset/jss-dataset/wav...
44
+ 1 /kaggle/input/prepared-dataset/jss-dataset/wav...
45
+ 2 /kaggle/input/prepared-dataset/jss-dataset/wav...
46
+ 3 /kaggle/input/prepared-dataset/jss-dataset/wav...
47
+ 4 /kaggle/input/prepared-dataset/jss-dataset/wav...
48
+ ... ...
49
+ 9995 /kaggle/input/prepared-dataset/jss-dataset/wav...
50
+ 9996 /kaggle/input/prepared-dataset/jss-dataset/wav...
51
+ 9997 /kaggle/input/prepared-dataset/jss-dataset/wav...
52
+ 9998 /kaggle/input/prepared-dataset/jss-dataset/wav...
53
+ 9999 /kaggle/input/prepared-dataset/jss-dataset/wav...
54
+ text
55
+ 0 지금부터 그러면 본격적으로 우리가 도련동에 대해서 조사를 할 거라 예? 이 마을이 ...
56
+ 1 예, 그건 한 칠백년 전에 이제 그 설촌이 시작이 되었다고 헙니다.
57
+ 2 예. 칠백년 전에 설촌이 뒈엇는데 이제 그루후에 이제 성씨들이 여러 성씨들이 많이 ...
58
+ 3 예. 그러면은 칠백년부터 허는데 설촌할 때 어떤 성씨들이 헷덴 말도 이신가마씨?
59
+ 4 그 다음 양씨. 고씨. 마 대략적으로 요런 순서가 뒘서양.
60
+ ... ...
61
+ 9995 메족메족 날 때 뜨셔가지고 서리 안 내리겟다 허당 자당 보민 뒷날 아첵이 서리 내리...
62
+ 9996 서리 내릴 땐 어떵헤야 뒈마씨?
63
+ 9997 게난 서리 내릴 때는 흑을 덮어줭은에. 흑을 덮어줘야 돼.
64
+ 9998 실시를 많이 허주게. 일찍 놧당은.
65
+ 9999 게난 요즘은 비니루가 이시니까. 비니루에도 고망 아이 뚜러지민 워낙 열 받으민 것도...
66
+ [10000 rows x 2 columns]
67
+ Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained.
68
+ /opt/conda/lib/python3.10/site-packages/transformers/configuration_utils.py:363: UserWarning: Passing `gradient_checkpointing` to a config initialization is deprecated and will be removed in v5 Transformers. Using `model.gradient_checkpointing_enable()` instead, or if you are using the `Trainer` API, pass `gradient_checkpointing=True` in your `TrainingArguments`.
69
+ warnings.warn(
70
+ Some weights of Wav2Vec2ForCTC were not initialized from the model checkpoint at facebook/wav2vec2-base and are newly initialized: ['lm_head.bias', 'lm_head.weight', 'wav2vec2.encoder.pos_conv_embed.conv.parametrizations.weight.original0', 'wav2vec2.encoder.pos_conv_embed.conv.parametrizations.weight.original1']
71
+ You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference.
72
+ /opt/conda/lib/python3.10/site-packages/datasets/load.py:756: FutureWarning: The repository for wer contains custom code which must be executed to correctly load the metric. You can inspect the repository content at https://raw.githubusercontent.com/huggingface/datasets/2.18.0/metrics/wer/wer.py
73
+ You can avoid this message in future by passing the argument `trust_remote_code=True`.
74
+ Passing `trust_remote_code=True` will be mandatory to load this metric from the next major release of `datasets`.
75
+ warnings.warn(
76
+ /opt/conda/lib/python3.10/site-packages/accelerate/accelerator.py:436: FutureWarning: Passing the following arguments to `Accelerator` is deprecated and will be removed in version 1.0 of Accelerate: dict_keys(['dispatch_batches', 'split_batches', 'even_batches', 'use_seedable_sampler']). Please pass an `accelerate.DataLoaderConfiguration` instead:
77
+ dataloader_config = DataLoaderConfiguration(dispatch_batches=None, split_batches=False, even_batches=True, use_seedable_sampler=True)
78
+ warnings.warn(
79
+ wandb: WARNING Calling wandb.login() after wandb.init() has no effect.
80
+ /opt/conda/lib/python3.10/site-packages/transformers/models/wav2vec2/processing_wav2vec2.py:156: UserWarning: `as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your labels by using the argument `text` of the regular `__call__` method (either in the same call as your audio inputs, or in a separate call.
81
+ warnings.warn(
82
+ /opt/conda/lib/python3.10/site-packages/torch/utils/checkpoint.py:429: UserWarning: torch.utils.checkpoint: please pass in use_reentrant=True or use_reentrant=False explicitly. The default value of use_reentrant will be updated to be False in the future. To maintain current behavior, pass use_reentrant=True. It is recommended that you use use_reentrant=False. Refer to docs for more details on the differences between the two variants.
83
+ warnings.warn(
84
+ /opt/conda/lib/python3.10/site-packages/accelerate/accelerator.py:436: FutureWarning: Passing the following arguments to `Accelerator` is deprecated and will be removed in version 1.0 of Accelerate: dict_keys(['dispatch_batches', 'split_batches', 'even_batches', 'use_seedable_sampler']). Please pass an `accelerate.DataLoaderConfiguration` instead:
85
+ dataloader_config = DataLoaderConfiguration(dispatch_batches=None, split_batches=False, even_batches=True, use_seedable_sampler=True)
86
+ warnings.warn(
87
+ /opt/conda/lib/python3.10/site-packages/transformers/models/wav2vec2/processing_wav2vec2.py:156: UserWarning: `as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your labels by using the argument `text` of the regular `__call__` method (either in the same call as your audio inputs, or in a separate call.
88
+ warnings.warn(
89
+ /opt/conda/lib/python3.10/site-packages/torch/utils/checkpoint.py:429: UserWarning: torch.utils.checkpoint: please pass in use_reentrant=True or use_reentrant=False explicitly. The default value of use_reentrant will be updated to be False in the future. To maintain current behavior, pass use_reentrant=True. It is recommended that you use use_reentrant=False. Refer to docs for more details on the differences between the two variants.
90
+ warnings.warn(
91
+ file \
92
+ 0 /kaggle/input/prepared-dataset/jss-dataset/wav...
93
+ 1 /kaggle/input/prepared-dataset/jss-dataset/wav...
94
+ 2 /kaggle/input/prepared-dataset/jss-dataset/wav...
95
+ 3 /kaggle/input/prepared-dataset/jss-dataset/wav...
96
+ 4 /kaggle/input/prepared-dataset/jss-dataset/wav...
97
+ ... ...
98
+ 9995 /kaggle/input/prepared-dataset/jss-dataset/wav...
99
+ 9996 /kaggle/input/prepared-dataset/jss-dataset/wav...
100
+ 9997 /kaggle/input/prepared-dataset/jss-dataset/wav...
101
+ 9998 /kaggle/input/prepared-dataset/jss-dataset/wav...
102
+ 9999 /kaggle/input/prepared-dataset/jss-dataset/wav...
103
+ text
104
+ 0 지금부터 그러면 본격적으로 우리가 도련동에 대해서 조사를 할 거라 예? 이 마을이 ...
105
+ 1 예, 그건 한 칠백년 전에 이제 그 설촌이 시작이 되었다고 헙니다.
106
+ 2 예. 칠백년 전에 설촌이 뒈엇는데 이제 그루후에 이제 성씨들이 여러 성씨들이 많이 ...
107
+ 3 예. 그러면은 칠백년부터 허는데 설촌할 때 어떤 성씨들이 헷덴 말도 이신가마씨?
108
+ 4 그 다음 양씨. 고씨. 마 대략적으로 요런 순서가 뒘서양.
109
+ ... ...
110
+ 9995 메족메족 날 때 뜨셔가지고 서리 안 내리겟다 허당 자당 보민 뒷날 아첵이 서리 내리...
111
+ 9996 서리 내릴 땐 어떵헤야 뒈마씨?
112
+ 9997 게난 서리 내릴 때는 흑을 덮어줭은에. 흑을 덮어줘야 돼.
113
+ 9998 실시를 많이 허주게. 일찍 놧당은.
114
+ 9999 게난 요즘은 비니루가 이시니까. 비니루에도 고망 아이 뚜러지민 워낙 열 받으민 것도...
115
+ [10000 rows x 2 columns]
116
+ file \
117
+ 0 /kaggle/input/prepared-dataset/jss-dataset/wav...
118
+ 1 /kaggle/input/prepared-dataset/jss-dataset/wav...
119
+ 2 /kaggle/input/prepared-dataset/jss-dataset/wav...
120
+ 3 /kaggle/input/prepared-dataset/jss-dataset/wav...
121
+ 4 /kaggle/input/prepared-dataset/jss-dataset/wav...
122
+ ... ...
123
+ 9995 /kaggle/input/prepared-dataset/jss-dataset/wav...
124
+ 9996 /kaggle/input/prepared-dataset/jss-dataset/wav...
125
+ 9997 /kaggle/input/prepared-dataset/jss-dataset/wav...
126
+ 9998 /kaggle/input/prepared-dataset/jss-dataset/wav...
127
+ 9999 /kaggle/input/prepared-dataset/jss-dataset/wav...
128
+ text \
129
+ 0 지금부터 그러면 본격적으로 우리가 도련동에 대해서 조사를 할 거라 예? 이 마을이 ...
130
+ 1 예, 그건 한 칠백년 전에 이제 그 설촌이 시작이 되었다고 헙니다.
131
+ 2 예. 칠백년 전에 설촌이 뒈엇는데 ���제 그루후에 이제 성씨들이 여러 성씨들이 많이 ...
132
+ 3 예. 그러면은 칠백년부터 허는데 설촌할 때 어떤 성씨들이 헷덴 말도 이신가마씨?
133
+ 4 그 다음 양씨. 고씨. 마 대략적으로 요런 순서가 뒘서양.
134
+ ... ...
135
+ 9995 메족메족 날 때 뜨셔가지고 서리 안 내리겟다 허당 자당 보민 뒷날 아첵이 서리 내리...
136
+ 9996 서리 내릴 땐 어떵헤야 뒈마씨?
137
+ 9997 게난 서리 내릴 때는 흑을 덮어줭은에. 흑을 덮어줘야 돼.
138
+ 9998 실시를 많이 허주게. 일찍 놧당은.
139
+ 9999 게난 요즘은 비니루가 이시니까. 비니루에도 고망 아이 뚜러지민 워낙 열 받으민 것도...
140
+ audio
141
+ 0 {'array': [8.564606e-05, -4.7375994e-05, 2.878...
142
+ 1 {'array': [-0.000804591, -0.00090140395, -0.00...
143
+ 2 {'array': [0.0007377937, 0.00092779216, 0.0005...
144
+ 3 {'array': [0.00077229546, 0.0008207406, 0.0001...
145
+ 4 {'array': [0.00037264611, 0.00073660095, 0.000...
146
+ ... ...
147
+ 9995 {'array': [-0.00028759346, -0.00042953866, -0....
148
+ 9996 {'array': [0.00026829867, 0.0003072208, -0.000...
149
+ 9997 {'array': [0.00016293344, 0.00022468295, -5.42...
150
+ 9998 {'array': [0.00092347193, 0.0012520454, 0.0012...
151
+ 9999 {'array': [0.00054650987, 0.0008459267, 0.0008...
152
+ [10000 rows x 3 columns]
153
+ Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained.
154
+ /opt/conda/lib/python3.10/site-packages/transformers/configuration_utils.py:363: UserWarning: Passing `gradient_checkpointing` to a config initialization is deprecated and will be removed in v5 Transformers. Using `model.gradient_checkpointing_enable()` instead, or if you are using the `Trainer` API, pass `gradient_checkpointing=True` in your `TrainingArguments`.
155
+ warnings.warn(
156
+ Some weights of Wav2Vec2ForCTC were not initialized from the model checkpoint at facebook/wav2vec2-base and are newly initialized: ['lm_head.bias', 'lm_head.weight', 'wav2vec2.encoder.pos_conv_embed.conv.parametrizations.weight.original0', 'wav2vec2.encoder.pos_conv_embed.conv.parametrizations.weight.original1']
157
+ You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference.
158
+ /opt/conda/lib/python3.10/site-packages/datasets/load.py:756: FutureWarning: The repository for wer contains custom code which must be executed to correctly load the metric. You can inspect the repository content at https://raw.githubusercontent.com/huggingface/datasets/2.18.0/metrics/wer/wer.py
159
+ You can avoid this message in future by passing the argument `trust_remote_code=True`.
160
+ Passing `trust_remote_code=True` will be mandatory to load this metric from the next major release of `datasets`.
161
+ warnings.warn(
162
+ /opt/conda/lib/python3.10/site-packages/accelerate/accelerator.py:436: FutureWarning: Passing the following arguments to `Accelerator` is deprecated and will be removed in version 1.0 of Accelerate: dict_keys(['dispatch_batches', 'split_batches', 'even_batches', 'use_seedable_sampler']). Please pass an `accelerate.DataLoaderConfiguration` instead:
163
+ dataloader_config = DataLoaderConfiguration(dispatch_batches=None, split_batches=False, even_batches=True, use_seedable_sampler=True)
164
+ warnings.warn(
165
+ /opt/conda/lib/python3.10/site-packages/transformers/models/wav2vec2/processing_wav2vec2.py:156: UserWarning: `as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your labels by using the argument `text` of the regular `__call__` method (either in the same call as your audio inputs, or in a separate call.
166
+ warnings.warn(
167
+ /opt/conda/lib/python3.10/site-packages/torch/utils/checkpoint.py:429: UserWarning: torch.utils.checkpoint: please pass in use_reentrant=True or use_reentrant=False explicitly. The default value of use_reentrant will be updated to be False in the future. To maintain current behavior, pass use_reentrant=True. It is recommended that you use use_reentrant=False. Refer to docs for more details on the differences between the two variants.
168
+ warnings.warn(
169
+ /opt/conda/lib/python3.10/site-packages/transformers/models/wav2vec2/processing_wav2vec2.py:156: UserWarning: `as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your labels by using the argument `text` of the regular `__call__` method (either in the same call as your audio inputs, or in a separate call.
170
+ warnings.warn(
171
+ /opt/conda/lib/python3.10/site-packages/torch/utils/checkpoint.py:429: UserWarning: torch.utils.checkpoint: please pass in use_reentrant=True or use_reentrant=False explicitly. The default value of use_reentrant will be updated to be False in the future. To maintain current behavior, pass use_reentrant=True. It is recommended that you use use_reentrant=False. Refer to docs for more details on the differences between the two variants.
wandb/run-20240524_202737-n1w0kmmv/files/requirements.txt ADDED
@@ -0,0 +1,862 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Babel==2.14.0
2
+ Boruta==0.3
3
+ Brotli==1.0.9
4
+ CVXcanon==0.1.2
5
+ Cartopy==0.23.0
6
+ Cython==3.0.8
7
+ Deprecated==1.2.14
8
+ Farama-Notifications==0.0.4
9
+ Flask==3.0.3
10
+ Geohash==1.0
11
+ GitPython==3.1.41
12
+ ImageHash==4.3.1
13
+ Janome==0.5.0
14
+ Jinja2==3.1.2
15
+ LunarCalendar==0.0.9
16
+ Mako==1.3.3
17
+ Markdown==3.5.2
18
+ MarkupSafe==2.1.3
19
+ MarkupSafe==2.1.5
20
+ Pillow==9.5.0
21
+ PuLP==2.8.0
22
+ PyArabic==0.6.15
23
+ PyJWT==2.8.0
24
+ PyMeeus==0.5.12
25
+ PySocks==1.7.1
26
+ PyUpSet==0.1.1.post7
27
+ PyWavelets==1.5.0
28
+ PyYAML==6.0.1
29
+ Pygments==2.17.2
30
+ Pympler==1.0.1
31
+ QtPy==2.4.1
32
+ Rtree==1.2.0
33
+ SQLAlchemy==2.0.25
34
+ SecretStorage==3.3.3
35
+ Send2Trash==1.8.2
36
+ Shapely==1.8.5.post1
37
+ Shimmy==1.3.0
38
+ SimpleITK==2.3.1
39
+ TPOT==0.12.1
40
+ Theano-PyMC==1.1.2
41
+ Theano==1.0.5
42
+ Wand==0.6.13
43
+ Werkzeug==3.0.2
44
+ absl-py==1.4.0
45
+ accelerate==0.29.3
46
+ access==1.1.9
47
+ affine==2.4.0
48
+ aiobotocore==2.12.3
49
+ aiofiles==22.1.0
50
+ aiohttp-cors==0.7.0
51
+ aiohttp==3.9.1
52
+ aioitertools==0.11.0
53
+ aiorwlock==1.3.0
54
+ aiosignal==1.3.1
55
+ aiosqlite==0.19.0
56
+ albumentations==1.4.0
57
+ alembic==1.13.1
58
+ altair==5.3.0
59
+ annotated-types==0.6.0
60
+ annoy==1.17.3
61
+ anyio==4.2.0
62
+ apache-beam==2.46.0
63
+ aplus==0.11.0
64
+ appdirs==1.4.4
65
+ archspec==0.2.3
66
+ argon2-cffi-bindings==21.2.0
67
+ argon2-cffi==23.1.0
68
+ array-record==0.5.0
69
+ arrow==1.3.0
70
+ arviz==0.18.0
71
+ astroid==3.1.0
72
+ astropy-iers-data==0.2024.4.15.2.45.49
73
+ astropy==6.0.1
74
+ asttokens==2.4.1
75
+ astunparse==1.6.3
76
+ async-lru==2.0.4
77
+ async-timeout==4.0.3
78
+ attrs==23.2.0
79
+ audioread==3.0.1
80
+ autopep8==2.0.4
81
+ backoff==2.2.1
82
+ bayesian-optimization==1.4.3
83
+ beatrix_jupyterlab==2023.128.151533
84
+ beautifulsoup4==4.12.2
85
+ blake3==0.2.1
86
+ bleach==6.1.0
87
+ blessed==1.20.0
88
+ blinker==1.7.0
89
+ blis==0.7.10
90
+ blosc2==2.6.2
91
+ bokeh==3.4.1
92
+ boltons==23.1.1
93
+ boto3==1.26.100
94
+ botocore==1.34.69
95
+ bq_helper==0.4.1
96
+ bqplot==0.12.43
97
+ branca==0.7.1
98
+ brewer2mpl==1.4.1
99
+ brotlipy==0.7.0
100
+ cached-property==1.5.2
101
+ cachetools==4.2.4
102
+ cachetools==5.3.2
103
+ catalogue==2.0.10
104
+ catalyst==22.4
105
+ catboost==1.2.3
106
+ category-encoders==2.6.3
107
+ certifi==2024.2.2
108
+ cesium==0.12.1
109
+ cffi==1.16.0
110
+ charset-normalizer==3.3.2
111
+ chex==0.1.86
112
+ cleverhans==4.0.0
113
+ click-plugins==1.1.1
114
+ click==8.1.7
115
+ cligj==0.7.2
116
+ cloud-tpu-client==0.10
117
+ cloud-tpu-profiler==2.4.0
118
+ cloudpathlib==0.16.0
119
+ cloudpickle==2.2.1
120
+ cloudpickle==3.0.0
121
+ cmdstanpy==1.2.2
122
+ colorama==0.4.6
123
+ colorcet==3.1.0
124
+ colorful==0.5.6
125
+ colorlog==6.8.2
126
+ colorlover==0.3.0
127
+ comm==0.2.1
128
+ conda-libmamba-solver==23.7.0
129
+ conda-package-handling==2.2.0
130
+ conda==23.7.4
131
+ conda_package_streaming==0.9.0
132
+ confection==0.1.4
133
+ contextily==1.6.0
134
+ contourpy==1.2.0
135
+ contourpy==1.2.1
136
+ convertdate==2.4.0
137
+ crcmod==1.7
138
+ cryptography==41.0.7
139
+ cuda-python==12.4.0
140
+ cudf==23.8.0
141
+ cufflinks==0.17.3
142
+ cuml==23.8.0
143
+ cupy==13.0.0
144
+ cycler==0.12.1
145
+ cymem==2.0.8
146
+ cytoolz==0.12.3
147
+ daal4py==2024.3.0
148
+ daal==2024.3.0
149
+ dacite==1.8.1
150
+ dask-cuda==23.8.0
151
+ dask-cudf==23.8.0
152
+ dask-expr==1.0.11
153
+ dask==2024.4.1
154
+ dataclasses-json==0.6.4
155
+ dataproc_jupyter_plugin==0.1.66
156
+ datasets==2.18.0
157
+ datashader==0.16.0
158
+ datatile==1.0.3
159
+ db-dtypes==1.2.0
160
+ deap==1.4.1
161
+ debugpy==1.8.0
162
+ decorator==5.1.1
163
+ deepdiff==7.0.1
164
+ defusedxml==0.7.1
165
+ deprecation==2.1.0
166
+ descartes==1.1.0
167
+ dill==0.3.8
168
+ dipy==1.9.0
169
+ distlib==0.3.8
170
+ distributed==2023.7.1
171
+ distro==1.9.0
172
+ dm-tree==0.1.8
173
+ docker-pycreds==0.4.0
174
+ docker==7.0.0
175
+ docopt==0.6.2
176
+ docstring-parser==0.15
177
+ docstring-to-markdown==0.15
178
+ docutils==0.21.1
179
+ earthengine-api==0.1.399
180
+ easydict==1.13
181
+ easyocr==1.7.1
182
+ ecos==2.0.13
183
+ eli5==0.13.0
184
+ emoji==2.11.0
185
+ en-core-web-lg==3.7.1
186
+ en-core-web-sm==3.7.1
187
+ entrypoints==0.4
188
+ ephem==4.1.5
189
+ esda==2.5.1
190
+ essentia==2.1b6.dev1110
191
+ et-xmlfile==1.1.0
192
+ etils==1.6.0
193
+ exceptiongroup==1.2.0
194
+ executing==2.0.1
195
+ explainable-ai-sdk==1.3.3
196
+ fastai==2.7.14
197
+ fastapi==0.108.0
198
+ fastavro==1.9.3
199
+ fastcore==1.5.29
200
+ fastdownload==0.0.7
201
+ fasteners==0.19
202
+ fastjsonschema==2.19.1
203
+ fastprogress==1.0.3
204
+ fastrlock==0.8.2
205
+ fasttext==0.9.2
206
+ feather-format==0.4.1
207
+ featuretools==1.30.0
208
+ filelock==3.13.1
209
+ fiona==1.9.6
210
+ fitter==1.7.0
211
+ flake8==7.0.0
212
+ flashtext==2.7
213
+ flatbuffers==23.5.26
214
+ flax==0.8.2
215
+ folium==0.16.0
216
+ fonttools==4.47.0
217
+ fonttools==4.51.0
218
+ fqdn==1.5.1
219
+ frozendict==2.4.2
220
+ frozenlist==1.4.1
221
+ fsspec==2024.2.0
222
+ fsspec==2024.3.1
223
+ funcy==2.0
224
+ fury==0.10.0
225
+ future==1.0.0
226
+ fuzzywuzzy==0.18.0
227
+ gast==0.5.4
228
+ gatspy==0.3
229
+ gcsfs==2024.2.0
230
+ gensim==4.3.2
231
+ geographiclib==2.0
232
+ geojson==3.1.0
233
+ geopandas==0.14.3
234
+ geoplot==0.5.1
235
+ geopy==2.4.1
236
+ geoviews==1.12.0
237
+ ggplot==0.11.5
238
+ giddy==2.3.5
239
+ gitdb==4.0.11
240
+ google-ai-generativelanguage==0.6.2
241
+ google-api-core==2.11.1
242
+ google-api-core==2.18.0
243
+ google-api-python-client==2.126.0
244
+ google-apitools==0.5.31
245
+ google-auth-httplib2==0.2.0
246
+ google-auth-oauthlib==1.2.0
247
+ google-auth==2.26.1
248
+ google-cloud-aiplatform==0.6.0a1
249
+ google-cloud-artifact-registry==1.10.0
250
+ google-cloud-automl==1.0.1
251
+ google-cloud-bigquery==2.34.4
252
+ google-cloud-bigtable==1.7.3
253
+ google-cloud-core==2.4.1
254
+ google-cloud-datastore==2.19.0
255
+ google-cloud-dlp==3.14.0
256
+ google-cloud-jupyter-config==0.0.5
257
+ google-cloud-language==2.13.3
258
+ google-cloud-monitoring==2.18.0
259
+ google-cloud-pubsub==2.19.0
260
+ google-cloud-pubsublite==1.9.0
261
+ google-cloud-recommendations-ai==0.7.1
262
+ google-cloud-resource-manager==1.11.0
263
+ google-cloud-spanner==3.40.1
264
+ google-cloud-storage==1.44.0
265
+ google-cloud-translate==3.12.1
266
+ google-cloud-videointelligence==2.13.3
267
+ google-cloud-vision==2.8.0
268
+ google-crc32c==1.5.0
269
+ google-generativeai==0.5.1
270
+ google-pasta==0.2.0
271
+ google-resumable-media==2.7.0
272
+ googleapis-common-protos==1.62.0
273
+ gplearn==0.4.2
274
+ gpustat==1.0.0
275
+ gpxpy==1.6.2
276
+ graphviz==0.20.3
277
+ greenlet==3.0.3
278
+ grpc-google-iam-v1==0.12.7
279
+ grpcio-status==1.48.1
280
+ grpcio-status==1.48.2
281
+ grpcio==1.51.1
282
+ grpcio==1.60.0
283
+ gviz-api==1.10.0
284
+ gym-notices==0.0.8
285
+ gym==0.26.2
286
+ gymnasium==0.29.0
287
+ h11==0.14.0
288
+ h2o==3.46.0.1
289
+ h5netcdf==1.3.0
290
+ h5py==3.10.0
291
+ haversine==2.8.1
292
+ hdfs==2.7.3
293
+ hep-ml==0.7.2
294
+ hijri-converter==2.3.1
295
+ hmmlearn==0.3.2
296
+ holidays==0.24
297
+ holoviews==1.18.3
298
+ hpsklearn==0.1.0
299
+ html5lib==1.1
300
+ htmlmin==0.1.12
301
+ httpcore==1.0.5
302
+ httplib2==0.21.0
303
+ httptools==0.6.1
304
+ httpx==0.27.0
305
+ huggingface-hub==0.22.2
306
+ hunspell==0.5.5
307
+ hydra-slayer==0.5.0
308
+ hyperopt==0.2.7
309
+ hypertools==0.8.0
310
+ idna==3.6
311
+ igraph==0.11.4
312
+ imagecodecs==2024.1.1
313
+ imageio==2.33.1
314
+ imbalanced-learn==0.12.2
315
+ imgaug==0.4.0
316
+ importlib-metadata==6.11.0
317
+ importlib-metadata==7.0.1
318
+ importlib-resources==6.1.1
319
+ inequality==1.0.1
320
+ iniconfig==2.0.0
321
+ ipydatawidgets==4.3.5
322
+ ipykernel==6.28.0
323
+ ipyleaflet==0.18.2
324
+ ipympl==0.7.0
325
+ ipython-genutils==0.2.0
326
+ ipython-genutils==0.2.0
327
+ ipython-sql==0.5.0
328
+ ipython==8.20.0
329
+ ipyvolume==0.6.3
330
+ ipyvue==1.11.0
331
+ ipyvuetify==1.9.4
332
+ ipywebrtc==0.6.0
333
+ ipywidgets==7.7.1
334
+ isoduration==20.11.0
335
+ isort==5.13.2
336
+ isoweek==1.3.3
337
+ itsdangerous==2.2.0
338
+ jaraco.classes==3.3.0
339
+ jax-jumpy==1.0.0
340
+ jax==0.4.23
341
+ jaxlib==0.4.23.dev20240116
342
+ jedi==0.19.1
343
+ jeepney==0.8.0
344
+ jieba==0.42.1
345
+ jiwer==3.0.4
346
+ jmespath==1.0.1
347
+ joblib==1.4.0
348
+ json5==0.9.14
349
+ jsonpatch==1.33
350
+ jsonpointer==2.4
351
+ jsonschema-specifications==2023.12.1
352
+ jsonschema==4.20.0
353
+ jupyter-console==6.6.3
354
+ jupyter-events==0.9.0
355
+ jupyter-http-over-ws==0.0.8
356
+ jupyter-lsp==1.5.1
357
+ jupyter-server-mathjax==0.2.6
358
+ jupyter-ydoc==0.2.5
359
+ jupyter_client==7.4.9
360
+ jupyter_client==8.6.0
361
+ jupyter_core==5.7.1
362
+ jupyter_server==2.12.5
363
+ jupyter_server_fileid==0.9.1
364
+ jupyter_server_proxy==4.1.0
365
+ jupyter_server_terminals==0.5.1
366
+ jupyter_server_ydoc==0.8.0
367
+ jupyterlab-lsp==5.1.0
368
+ jupyterlab-widgets==3.0.9
369
+ jupyterlab==4.1.6
370
+ jupyterlab_git==0.44.0
371
+ jupyterlab_pygments==0.3.0
372
+ jupyterlab_server==2.25.2
373
+ jupytext==1.16.0
374
+ kaggle-environments==1.14.3
375
+ kaggle==1.6.12
376
+ kagglehub==0.2.3
377
+ keras-cv==0.8.2
378
+ keras-nlp==0.9.3
379
+ keras-tuner==1.4.6
380
+ keras==3.2.1
381
+ kernels-mixer==0.0.7
382
+ keyring==24.3.0
383
+ keyrings.google-artifactregistry-auth==1.1.2
384
+ kfp-pipeline-spec==0.2.2
385
+ kfp-server-api==2.0.5
386
+ kfp==2.5.0
387
+ kiwisolver==1.4.5
388
+ kmapper==2.0.1
389
+ kmodes==0.12.2
390
+ korean-lunar-calendar==0.3.1
391
+ kornia==0.7.2
392
+ kornia_rs==0.1.3
393
+ kt-legacy==1.0.5
394
+ kubernetes==26.1.0
395
+ langcodes==3.3.0
396
+ langid==1.1.6
397
+ lazy_loader==0.3
398
+ learntools==0.3.4
399
+ leven==1.0.4
400
+ libclang==16.0.6
401
+ libmambapy==1.5.0
402
+ libpysal==4.9.2
403
+ librosa==0.10.1
404
+ lightgbm==4.2.0
405
+ lightning-utilities==0.11.2
406
+ lime==0.2.0.1
407
+ line-profiler==4.1.2
408
+ linkify-it-py==2.0.3
409
+ llvmlite==0.41.1
410
+ llvmlite==0.42.0
411
+ lml==0.1.0
412
+ locket==1.0.0
413
+ loguru==0.7.2
414
+ lxml==5.2.1
415
+ lz4==4.3.3
416
+ mamba==1.5.0
417
+ mapclassify==2.6.1
418
+ markdown-it-py==3.0.0
419
+ marshmallow==3.21.1
420
+ matplotlib-inline==0.1.6
421
+ matplotlib-venn==0.11.10
422
+ matplotlib==3.7.5
423
+ matplotlib==3.8.4
424
+ mccabe==0.7.0
425
+ mdit-py-plugins==0.4.0
426
+ mdurl==0.1.2
427
+ memory-profiler==0.61.0
428
+ menuinst==2.0.1
429
+ mercantile==1.2.1
430
+ mgwr==2.2.1
431
+ missingno==0.5.2
432
+ mistune==0.8.4
433
+ mizani==0.11.1
434
+ ml-dtypes==0.2.0
435
+ mlcrate==0.2.0
436
+ mlens==0.2.3
437
+ mlxtend==0.23.1
438
+ mne==1.6.1
439
+ mnist==0.2.2
440
+ momepy==0.7.0
441
+ more-itertools==10.2.0
442
+ mpld3==0.5.10
443
+ mpmath==1.3.0
444
+ msgpack==1.0.7
445
+ multidict==6.0.4
446
+ multimethod==1.10
447
+ multipledispatch==1.0.0
448
+ multiprocess==0.70.16
449
+ munkres==1.1.4
450
+ murmurhash==1.0.10
451
+ mypy-extensions==1.0.0
452
+ namex==0.0.8
453
+ nb-conda-kernels==2.3.1
454
+ nb_conda==2.2.1
455
+ nbclassic==1.0.0
456
+ nbclient==0.5.13
457
+ nbconvert==6.4.5
458
+ nbdime==3.2.0
459
+ nbformat==5.9.2
460
+ ndindex==1.8
461
+ nest-asyncio==1.5.8
462
+ networkx==3.2.1
463
+ nibabel==5.2.1
464
+ nilearn==0.10.4
465
+ ninja==1.11.1.1
466
+ nltk==3.2.4
467
+ nose==1.3.7
468
+ notebook==6.5.4
469
+ notebook==6.5.6
470
+ notebook_executor==0.2
471
+ notebook_shim==0.2.3
472
+ numba==0.58.1
473
+ numba==0.59.1
474
+ numexpr==2.10.0
475
+ numpy==1.26.4
476
+ nvidia-ml-py==11.495.46
477
+ nvtx==0.2.10
478
+ oauth2client==4.1.3
479
+ oauthlib==3.2.2
480
+ objsize==0.6.1
481
+ odfpy==1.4.1
482
+ olefile==0.47
483
+ onnx==1.16.0
484
+ opencensus-context==0.1.3
485
+ opencensus==0.11.4
486
+ opencv-contrib-python==4.9.0.80
487
+ opencv-python-headless==4.9.0.80
488
+ opencv-python==4.9.0.80
489
+ openpyxl==3.1.2
490
+ openslide-python==1.3.1
491
+ opentelemetry-api==1.22.0
492
+ opentelemetry-exporter-otlp-proto-common==1.22.0
493
+ opentelemetry-exporter-otlp-proto-grpc==1.22.0
494
+ opentelemetry-exporter-otlp-proto-http==1.22.0
495
+ opentelemetry-exporter-otlp==1.22.0
496
+ opentelemetry-proto==1.22.0
497
+ opentelemetry-sdk==1.22.0
498
+ opentelemetry-semantic-conventions==0.43b0
499
+ opt-einsum==3.3.0
500
+ optax==0.2.2
501
+ optree==0.11.0
502
+ optuna==3.6.1
503
+ orbax-checkpoint==0.5.9
504
+ ordered-set==4.1.0
505
+ orjson==3.9.10
506
+ ortools==9.4.1874
507
+ osmnx==1.9.2
508
+ overrides==7.4.0
509
+ packaging==21.3
510
+ pandas-datareader==0.10.0
511
+ pandas-profiling==3.6.6
512
+ pandas-summary==0.2.0
513
+ pandas==2.1.4
514
+ pandas==2.2.2
515
+ pandasql==0.7.3
516
+ pandocfilters==1.5.0
517
+ panel==1.4.1
518
+ papermill==2.5.0
519
+ param==2.1.0
520
+ parso==0.8.3
521
+ partd==1.4.1
522
+ path.py==12.5.0
523
+ path==16.14.0
524
+ pathos==0.3.2
525
+ pathy==0.10.3
526
+ patsy==0.5.6
527
+ pdf2image==1.17.0
528
+ pettingzoo==1.24.0
529
+ pexpect==4.8.0
530
+ pexpect==4.9.0
531
+ phik==0.12.4
532
+ pickleshare==0.7.5
533
+ pillow==10.3.0
534
+ pip==23.3.2
535
+ pkgutil_resolve_name==1.3.10
536
+ platformdirs==4.2.0
537
+ plotly-express==0.4.1
538
+ plotly==5.18.0
539
+ plotnine==0.13.4
540
+ pluggy==1.4.0
541
+ pointpats==2.4.0
542
+ polars==0.20.21
543
+ polyglot==16.7.4
544
+ pooch==1.8.1
545
+ pox==0.3.4
546
+ ppca==0.0.4
547
+ ppft==1.7.6.8
548
+ preprocessing==0.1.13
549
+ preshed==3.0.9
550
+ prettytable==3.9.0
551
+ progressbar2==4.4.2
552
+ prometheus-client==0.19.0
553
+ promise==2.3
554
+ prompt-toolkit==3.0.42
555
+ prompt-toolkit==3.0.43
556
+ prophet==1.1.1
557
+ proto-plus==1.23.0
558
+ protobuf==3.20.3
559
+ protobuf==4.21.12
560
+ psutil==5.9.3
561
+ psutil==5.9.7
562
+ ptyprocess==0.7.0
563
+ pudb==2024.1
564
+ pure-eval==0.2.2
565
+ py-cpuinfo==9.0.0
566
+ py-spy==0.3.14
567
+ py4j==0.10.9.7
568
+ pyLDAvis==3.4.1
569
+ pyOpenSSL==23.3.0
570
+ pyaml==23.12.0
571
+ pyarrow-hotfix==0.6
572
+ pyarrow==15.0.2
573
+ pyasn1-modules==0.3.0
574
+ pyasn1==0.5.1
575
+ pybind11==2.12.0
576
+ pyclipper==1.3.0.post5
577
+ pycodestyle==2.11.1
578
+ pycosat==0.6.6
579
+ pycparser==2.21
580
+ pycryptodome==3.20.0
581
+ pyct==0.5.0
582
+ pycuda==2024.1
583
+ pydantic==2.5.3
584
+ pydantic==2.7.0
585
+ pydantic_core==2.14.6
586
+ pydantic_core==2.18.1
587
+ pydegensac==0.1.2
588
+ pydicom==2.4.4
589
+ pydocstyle==6.3.0
590
+ pydot==1.4.2
591
+ pydub==0.25.1
592
+ pyemd==1.0.0
593
+ pyerfa==2.0.1.4
594
+ pyexcel-io==0.6.6
595
+ pyexcel-ods==0.6.0
596
+ pyflakes==3.2.0
597
+ pygltflib==1.16.2
598
+ pykalman==0.9.7
599
+ pylibraft==23.8.0
600
+ pylint==3.1.0
601
+ pymc3==3.11.4
602
+ pymongo==3.13.0
603
+ pynndescent==0.5.12
604
+ pynvml==11.4.1
605
+ pynvrtc==9.2
606
+ pyparsing==3.1.1
607
+ pyparsing==3.1.2
608
+ pypdf==4.2.0
609
+ pyproj==3.6.1
610
+ pysal==24.1
611
+ pyshp==2.3.1
612
+ pytesseract==0.3.10
613
+ pytest==8.1.1
614
+ python-bidi==0.4.2
615
+ python-dateutil==2.9.0.post0
616
+ python-dotenv==1.0.0
617
+ python-json-logger==2.0.7
618
+ python-louvain==0.16
619
+ python-lsp-jsonrpc==1.1.2
620
+ python-lsp-server==1.11.0
621
+ python-slugify==8.0.4
622
+ python-utils==3.8.2
623
+ pythreejs==2.4.2
624
+ pytoolconfig==1.3.1
625
+ pytools==2024.1.1
626
+ pytorch-ignite==0.5.0.post2
627
+ pytorch-lightning==2.2.2
628
+ pytz==2023.3.post1
629
+ pytz==2024.1
630
+ pyu2f==0.1.5
631
+ pyviz_comms==3.0.2
632
+ pyzmq==24.0.1
633
+ pyzmq==25.1.2
634
+ qgrid==1.3.1
635
+ qtconsole==5.5.1
636
+ quantecon==0.7.2
637
+ qudida==0.0.4
638
+ raft-dask==23.8.0
639
+ rapidfuzz==3.9.1
640
+ rasterio==1.3.10
641
+ rasterstats==0.19.0
642
+ ray-cpp==2.9.0
643
+ ray==2.9.0
644
+ referencing==0.32.1
645
+ regex==2023.12.25
646
+ requests-oauthlib==1.3.1
647
+ requests-toolbelt==0.10.1
648
+ requests==2.31.0
649
+ retrying==1.3.3
650
+ retrying==1.3.4
651
+ rfc3339-validator==0.1.4
652
+ rfc3986-validator==0.1.1
653
+ rgf-python==3.12.0
654
+ rich-click==1.7.4
655
+ rich==13.7.0
656
+ rich==13.7.1
657
+ rmm==23.8.0
658
+ rope==1.13.0
659
+ rpds-py==0.16.2
660
+ rsa==4.9
661
+ ruamel-yaml-conda==0.15.100
662
+ ruamel.yaml.clib==0.2.7
663
+ ruamel.yaml==0.17.40
664
+ s2sphere==0.2.5
665
+ s3fs==2024.2.0
666
+ s3transfer==0.6.2
667
+ safetensors==0.4.3
668
+ scattertext==0.1.19
669
+ scikit-image==0.22.0
670
+ scikit-learn-intelex==2024.3.0
671
+ scikit-learn==1.2.2
672
+ scikit-multilearn==0.2.0
673
+ scikit-optimize==0.10.1
674
+ scikit-plot==0.3.7
675
+ scikit-surprise==1.1.3
676
+ scipy==1.11.4
677
+ scipy==1.13.0
678
+ seaborn==0.12.2
679
+ segment_anything==1.0
680
+ segregation==2.5
681
+ semver==3.0.2
682
+ sentencepiece==0.2.0
683
+ sentry-sdk==1.45.0
684
+ setproctitle==1.3.3
685
+ setuptools-git==1.2
686
+ setuptools-scm==8.0.4
687
+ setuptools==69.0.3
688
+ shap==0.44.1
689
+ shapely==2.0.4
690
+ shellingham==1.5.4
691
+ simpervisor==1.0.0
692
+ simplejson==3.19.2
693
+ six==1.16.0
694
+ sklearn-pandas==2.2.0
695
+ slicer==0.0.7
696
+ smart-open==6.4.0
697
+ smmap==5.0.1
698
+ sniffio==1.3.0
699
+ snowballstemmer==2.2.0
700
+ snuggs==1.4.7
701
+ sortedcontainers==2.4.0
702
+ soundfile==0.12.1
703
+ soupsieve==2.5
704
+ soxr==0.3.7
705
+ spacy-legacy==3.0.12
706
+ spacy-loggers==1.0.5
707
+ spacy==3.7.3
708
+ spaghetti==1.7.5.post1
709
+ spectral==0.23.1
710
+ spglm==1.1.0
711
+ sphinx-rtd-theme==0.2.4
712
+ spint==1.0.7
713
+ splot==1.1.5.post1
714
+ spopt==0.6.0
715
+ spreg==1.4.2
716
+ spvcm==0.3.0
717
+ sqlparse==0.4.4
718
+ squarify==0.4.3
719
+ srsly==2.4.8
720
+ stable-baselines3==2.1.0
721
+ stack-data==0.6.2
722
+ stack-data==0.6.3
723
+ stanio==0.5.0
724
+ starlette==0.32.0.post1
725
+ statsmodels==0.14.1
726
+ stemming==1.0.1
727
+ stop-words==2018.7.23
728
+ stopit==1.1.2
729
+ stumpy==1.12.0
730
+ sympy==1.12
731
+ tables==3.9.2
732
+ tabulate==0.9.0
733
+ tangled-up-in-unicode==0.2.0
734
+ tbb==2021.12.0
735
+ tblib==3.0.0
736
+ tenacity==8.2.3
737
+ tensorboard-data-server==0.7.2
738
+ tensorboard-plugin-profile==2.15.0
739
+ tensorboard==2.15.1
740
+ tensorboardX==2.6.2.2
741
+ tensorflow-cloud==0.1.16
742
+ tensorflow-datasets==4.9.4
743
+ tensorflow-decision-forests==1.8.1
744
+ tensorflow-estimator==2.15.0
745
+ tensorflow-hub==0.16.1
746
+ tensorflow-io-gcs-filesystem==0.35.0
747
+ tensorflow-io==0.35.0
748
+ tensorflow-metadata==0.14.0
749
+ tensorflow-probability==0.23.0
750
+ tensorflow-serving-api==2.14.1
751
+ tensorflow-text==2.15.0
752
+ tensorflow-transform==0.14.0
753
+ tensorflow==2.15.0
754
+ tensorstore==0.1.56
755
+ termcolor==2.4.0
756
+ terminado==0.18.0
757
+ testpath==0.6.0
758
+ text-unidecode==1.3
759
+ textblob==0.18.0.post0
760
+ texttable==1.7.0
761
+ tf_keras==2.15.1
762
+ tfp-nightly==0.24.0.dev0
763
+ thinc==8.2.2
764
+ threadpoolctl==3.2.0
765
+ tifffile==2023.12.9
766
+ timm==0.9.16
767
+ tinycss2==1.2.1
768
+ tobler==0.11.2
769
+ tokenizers==0.15.2
770
+ toml==0.10.2
771
+ tomli==2.0.1
772
+ tomlkit==0.12.4
773
+ toolz==0.12.1
774
+ torch==2.1.2
775
+ torchaudio==2.1.2
776
+ torchdata==0.7.1
777
+ torchinfo==1.8.0
778
+ torchmetrics==1.3.2
779
+ torchtext==0.16.2
780
+ torchvision==0.16.2
781
+ tornado==6.3.3
782
+ tqdm==4.66.1
783
+ traceml==1.0.8
784
+ traitlets==5.9.0
785
+ traittypes==0.2.1
786
+ transformers==4.39.3
787
+ treelite-runtime==3.2.0
788
+ treelite==3.2.0
789
+ truststore==0.8.0
790
+ trx-python==0.2.9
791
+ tsfresh==0.20.2
792
+ typeguard==4.1.5
793
+ typer==0.9.0
794
+ typer==0.9.4
795
+ types-python-dateutil==2.8.19.20240106
796
+ typing-inspect==0.9.0
797
+ typing-utils==0.1.0
798
+ typing_extensions==4.9.0
799
+ tzdata==2023.4
800
+ uc-micro-py==1.0.3
801
+ ucx-py==0.33.0
802
+ ujson==5.9.0
803
+ umap-learn==0.5.6
804
+ unicodedata2==15.1.0
805
+ update-checker==0.18.0
806
+ uri-template==1.3.0
807
+ uritemplate==3.0.1
808
+ urllib3==1.26.18
809
+ urllib3==2.1.0
810
+ urwid==2.6.10
811
+ urwid_readline==0.14
812
+ uvicorn==0.25.0
813
+ uvloop==0.19.0
814
+ vaex-astro==0.9.3
815
+ vaex-core==4.17.1
816
+ vaex-hdf5==0.14.1
817
+ vaex-jupyter==0.8.2
818
+ vaex-ml==0.18.3
819
+ vaex-server==0.9.0
820
+ vaex-viz==0.5.4
821
+ vaex==4.17.0
822
+ vec_noise==1.1.4
823
+ vecstack==0.4.0
824
+ virtualenv==20.21.0
825
+ visions==0.7.5
826
+ vowpalwabbit==9.9.0
827
+ vtk==9.3.0
828
+ wandb==0.16.6
829
+ wasabi==1.1.2
830
+ watchfiles==0.21.0
831
+ wavio==0.0.8
832
+ wcwidth==0.2.13
833
+ weasel==0.3.4
834
+ webcolors==1.13
835
+ webencodings==0.5.1
836
+ websocket-client==1.7.0
837
+ websockets==12.0
838
+ wfdb==4.1.2
839
+ whatthepatch==1.0.5
840
+ wheel==0.42.0
841
+ widgetsnbextension==3.6.6
842
+ witwidget==1.8.1
843
+ woodwork==0.30.0
844
+ wordcloud==1.9.3
845
+ wordsegment==1.3.1
846
+ wrapt==1.14.1
847
+ xarray-einstats==0.7.0
848
+ xarray==2024.3.0
849
+ xgboost==2.0.3
850
+ xvfbwrapper==0.2.9
851
+ xxhash==3.4.1
852
+ xyzservices==2024.4.0
853
+ y-py==0.6.2
854
+ yapf==0.40.2
855
+ yarl==1.9.3
856
+ yarl==1.9.4
857
+ ydata-profiling==4.6.4
858
+ yellowbrick==1.5
859
+ ypy-websocket==0.8.4
860
+ zict==3.0.0
861
+ zipp==3.17.0
862
+ zstandard==0.22.0
wandb/run-20240524_202737-n1w0kmmv/files/wandb-metadata.json ADDED
@@ -0,0 +1,62 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "os": "Linux-5.15.133+-x86_64-with-glibc2.31",
3
+ "python": "3.10.13",
4
+ "heartbeatAt": "2024-05-24T20:27:38.540904",
5
+ "startedAt": "2024-05-24T20:27:37.415317",
6
+ "docker": null,
7
+ "cuda": null,
8
+ "args": [],
9
+ "state": "running",
10
+ "program": "kaggle.ipynb",
11
+ "codePathLocal": null,
12
+ "root": "/kaggle/working",
13
+ "host": "890e8b3ca76b",
14
+ "username": "root",
15
+ "executable": "/opt/conda/bin/python3.10",
16
+ "cpu_count": 2,
17
+ "cpu_count_logical": 4,
18
+ "cpu_freq": {
19
+ "current": 2000.17,
20
+ "min": 0.0,
21
+ "max": 0.0
22
+ },
23
+ "cpu_freq_per_core": [
24
+ {
25
+ "current": 2000.17,
26
+ "min": 0.0,
27
+ "max": 0.0
28
+ },
29
+ {
30
+ "current": 2000.17,
31
+ "min": 0.0,
32
+ "max": 0.0
33
+ },
34
+ {
35
+ "current": 2000.17,
36
+ "min": 0.0,
37
+ "max": 0.0
38
+ },
39
+ {
40
+ "current": 2000.17,
41
+ "min": 0.0,
42
+ "max": 0.0
43
+ }
44
+ ],
45
+ "disk": {
46
+ "/": {
47
+ "total": 8062.387607574463,
48
+ "used": 5599.177722930908
49
+ }
50
+ },
51
+ "gpu": "Tesla P100-PCIE-16GB",
52
+ "gpu_count": 1,
53
+ "gpu_devices": [
54
+ {
55
+ "name": "Tesla P100-PCIE-16GB",
56
+ "memory_total": 17179869184
57
+ }
58
+ ],
59
+ "memory": {
60
+ "total": 31.357559204101562
61
+ }
62
+ }
wandb/run-20240524_202737-n1w0kmmv/files/wandb-summary.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"train/loss": 8079.875, "train/grad_norm": NaN, "train/learning_rate": 3.6e-06, "train/epoch": 11.36, "train/global_step": 500, "_timestamp": 1716584600.8230615, "_runtime": 2143.3996374607086, "_step": 1, "eval/loss": NaN, "eval/wer": 0.9984338292873923, "eval/runtime": 10.1243, "eval/samples_per_second": 19.754, "eval/steps_per_second": 2.469}
wandb/run-20240524_202737-n1w0kmmv/logs/debug-internal.log ADDED
The diff for this file is too large to render. See raw diff
 
wandb/run-20240524_202737-n1w0kmmv/logs/debug.log ADDED
@@ -0,0 +1,217 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ 2024-05-24 20:27:37,417 INFO MainThread:34 [wandb_setup.py:_flush():76] Current SDK version is 0.16.6
2
+ 2024-05-24 20:27:37,417 INFO MainThread:34 [wandb_setup.py:_flush():76] Configure stats pid to 34
3
+ 2024-05-24 20:27:37,417 INFO MainThread:34 [wandb_setup.py:_flush():76] Loading settings from /root/.config/wandb/settings
4
+ 2024-05-24 20:27:37,417 INFO MainThread:34 [wandb_setup.py:_flush():76] Loading settings from /kaggle/working/wandb/settings
5
+ 2024-05-24 20:27:37,417 INFO MainThread:34 [wandb_setup.py:_flush():76] Loading settings from environment variables: {}
6
+ 2024-05-24 20:27:37,417 INFO MainThread:34 [wandb_setup.py:_flush():76] Applying setup settings: {'_disable_service': False}
7
+ 2024-05-24 20:27:37,417 INFO MainThread:34 [wandb_setup.py:_flush():76] Inferring run settings from compute environment: {'program': '<python with no main file>'}
8
+ 2024-05-24 20:27:37,417 INFO MainThread:34 [wandb_setup.py:_flush():76] Applying login settings: {}
9
+ 2024-05-24 20:27:37,417 INFO MainThread:34 [wandb_setup.py:_flush():76] Applying login settings: {'api_key': '***REDACTED***'}
10
+ 2024-05-24 20:27:37,417 INFO MainThread:34 [wandb_init.py:_log_setup():521] Logging user logs to /kaggle/working/wandb/run-20240524_202737-n1w0kmmv/logs/debug.log
11
+ 2024-05-24 20:27:37,417 INFO MainThread:34 [wandb_init.py:_log_setup():522] Logging internal logs to /kaggle/working/wandb/run-20240524_202737-n1w0kmmv/logs/debug-internal.log
12
+ 2024-05-24 20:27:37,417 INFO MainThread:34 [wandb_init.py:_jupyter_setup():467] configuring jupyter hooks <wandb.sdk.wandb_init._WandbInit object at 0x785a802b9120>
13
+ 2024-05-24 20:27:37,418 INFO MainThread:34 [wandb_init.py:init():561] calling init triggers
14
+ 2024-05-24 20:27:37,418 INFO MainThread:34 [wandb_init.py:init():568] wandb.init called with sweep_config: {}
15
+ config: {}
16
+ 2024-05-24 20:27:37,418 INFO MainThread:34 [wandb_init.py:init():611] starting backend
17
+ 2024-05-24 20:27:37,418 INFO MainThread:34 [wandb_init.py:init():615] setting up manager
18
+ 2024-05-24 20:27:37,420 INFO MainThread:34 [backend.py:_multiprocessing_setup():105] multiprocessing start_methods=fork,spawn,forkserver, using: spawn
19
+ 2024-05-24 20:27:37,423 INFO MainThread:34 [wandb_init.py:init():623] backend started and connected
20
+ 2024-05-24 20:27:37,434 INFO MainThread:34 [wandb_run.py:_label_probe_notebook():1299] probe notebook
21
+ 2024-05-24 20:27:38,191 INFO MainThread:34 [wandb_init.py:init():715] updated telemetry
22
+ 2024-05-24 20:27:38,195 INFO MainThread:34 [wandb_init.py:init():748] communicating run to backend with 90.0 second timeout
23
+ 2024-05-24 20:27:38,448 INFO MainThread:34 [wandb_run.py:_on_init():2357] communicating current version
24
+ 2024-05-24 20:27:38,512 INFO MainThread:34 [wandb_run.py:_on_init():2366] got version response upgrade_message: "wandb version 0.17.0 is available! To upgrade, please run:\n $ pip install wandb --upgrade"
25
+
26
+ 2024-05-24 20:27:38,514 INFO MainThread:34 [wandb_init.py:init():799] starting run threads in backend
27
+ 2024-05-24 20:27:54,627 INFO MainThread:34 [wandb_run.py:_console_start():2335] atexit reg
28
+ 2024-05-24 20:27:54,628 INFO MainThread:34 [wandb_run.py:_redirect():2190] redirect: wrap_raw
29
+ 2024-05-24 20:27:54,628 INFO MainThread:34 [wandb_run.py:_redirect():2255] Wrapping output streams.
30
+ 2024-05-24 20:27:54,629 INFO MainThread:34 [wandb_run.py:_redirect():2280] Redirects installed.
31
+ 2024-05-24 20:27:54,630 INFO MainThread:34 [wandb_init.py:init():842] run started, returning control to user process
32
+ 2024-05-24 20:27:54,637 INFO MainThread:34 [wandb_run.py:_config_callback():1347] config_cb None None {'return_dict': True, 'output_hidden_states': False, 'output_attentions': False, 'torchscript': False, 'torch_dtype': None, 'use_bfloat16': False, 'tf_legacy_loss': False, 'pruned_heads': {}, 'tie_word_embeddings': True, 'chunk_size_feed_forward': 0, 'is_encoder_decoder': False, 'is_decoder': False, 'cross_attention_hidden_size': None, 'add_cross_attention': False, 'tie_encoder_decoder': False, 'max_length': 20, 'min_length': 0, 'do_sample': False, 'early_stopping': False, 'num_beams': 1, 'num_beam_groups': 1, 'diversity_penalty': 0.0, 'temperature': 1.0, 'top_k': 50, 'top_p': 1.0, 'typical_p': 1.0, 'repetition_penalty': 1.0, 'length_penalty': 1.0, 'no_repeat_ngram_size': 0, 'encoder_no_repeat_ngram_size': 0, 'bad_words_ids': None, 'num_return_sequences': 1, 'output_scores': False, 'return_dict_in_generate': False, 'forced_bos_token_id': None, 'forced_eos_token_id': None, 'remove_invalid_values': False, 'exponential_decay_length_penalty': None, 'suppress_tokens': None, 'begin_suppress_tokens': None, 'architectures': ['Wav2Vec2ForPreTraining'], 'finetuning_task': None, 'id2label': {0: 'LABEL_0', 1: 'LABEL_1'}, 'label2id': {'LABEL_0': 0, 'LABEL_1': 1}, 'tokenizer_class': None, 'prefix': None, 'bos_token_id': 1, 'pad_token_id': 0, 'eos_token_id': 2, 'sep_token_id': None, 'decoder_start_token_id': None, 'task_specific_params': None, 'problem_type': None, '_name_or_path': 'facebook/wav2vec2-base', 'transformers_version': '4.39.3', 'freeze_feat_extract_train': True, 'mask_channel_length': 10, 'mask_channel_min_space': 1, 'mask_channel_other': 0.0, 'mask_channel_prob': 0.0, 'mask_channel_selection': 'static', 'mask_time_min_space': 1, 'mask_time_other': 0.0, 'mask_time_selection': 'static', 'model_type': 'wav2vec2', 'no_mask_channel_overlap': False, 'no_mask_time_overlap': False, 'num_feat_extract_layers': 7, 'hidden_size': 768, 'feat_extract_norm': 'group', 'feat_extract_activation': 'gelu', 'conv_dim': [512, 512, 512, 512, 512, 512, 512], 'conv_stride': [5, 2, 2, 2, 2, 2, 2], 'conv_kernel': [10, 3, 3, 3, 3, 2, 2], 'conv_bias': False, 'num_conv_pos_embeddings': 128, 'num_conv_pos_embedding_groups': 16, 'num_hidden_layers': 12, 'intermediate_size': 3072, 'hidden_act': 'gelu', 'num_attention_heads': 12, 'hidden_dropout': 0.1, 'attention_dropout': 0.1, 'activation_dropout': 0.0, 'feat_proj_dropout': 0.1, 'final_dropout': 0.0, 'layerdrop': 0.0, 'layer_norm_eps': 1e-05, 'initializer_range': 0.02, 'vocab_size': 32, 'do_stable_layer_norm': False, 'use_weighted_layer_sum': False, 'apply_spec_augment': True, 'mask_time_prob': 0.05, 'mask_time_length': 10, 'mask_time_min_masks': 2, 'mask_feature_prob': 0.0, 'mask_feature_length': 10, 'mask_feature_min_masks': 0, 'num_codevectors_per_group': 320, 'num_codevector_groups': 2, 'contrastive_logits_temperature': 0.1, 'feat_quantizer_dropout': 0.0, 'num_negatives': 100, 'codevector_dim': 256, 'proj_codevector_dim': 256, 'diversity_loss_weight': 0.1, 'ctc_loss_reduction': 'sum', 'ctc_zero_infinity': False, 'add_adapter': False, 'adapter_kernel_size': 3, 'adapter_stride': 2, 'num_adapter_layers': 3, 'output_hidden_size': 768, 'adapter_attn_dim': None, 'classifier_proj_size': 256, 'tdnn_dim': [512, 512, 512, 512, 1500], 'tdnn_kernel': [5, 3, 3, 1, 1], 'tdnn_dilation': [1, 2, 3, 1, 1], 'xvector_output_dim': 512, 'output_dir': '/kaggle/working/', 'overwrite_output_dir': False, 'do_train': False, 'do_eval': True, 'do_predict': False, 'evaluation_strategy': 'steps', 'prediction_loss_only': False, 'per_device_train_batch_size': 32, 'per_device_eval_batch_size': 8, 'per_gpu_train_batch_size': None, 'per_gpu_eval_batch_size': None, 'gradient_accumulation_steps': 1, 'eval_accumulation_steps': None, 'eval_delay': 0, 'learning_rate': 0.0001, 'weight_decay': 0.005, 'adam_beta1': 0.9, 'adam_beta2': 0.999, 'adam_epsilon': 1e-08, 'max_grad_norm': 1.0, 'num_train_epochs': 30, 'max_steps': -1, 'lr_scheduler_type': 'linear', 'lr_scheduler_kwargs': {}, 'warmup_ratio': 0.0, 'warmup_steps': 1000, 'log_level': 'passive', 'log_level_replica': 'warning', 'log_on_each_node': True, 'logging_dir': '/kaggle/working/runs/May24_20-24-49_890e8b3ca76b', 'logging_strategy': 'steps', 'logging_first_step': False, 'logging_steps': 500, 'logging_nan_inf_filter': True, 'save_strategy': 'steps', 'save_steps': 500, 'save_total_limit': 2, 'save_safetensors': True, 'save_on_each_node': False, 'save_only_model': False, 'no_cuda': False, 'use_cpu': False, 'use_mps_device': False, 'seed': 42, 'data_seed': None, 'jit_mode_eval': False, 'use_ipex': False, 'bf16': False, 'fp16': True, 'fp16_opt_level': 'O1', 'half_precision_backend': 'auto', 'bf16_full_eval': False, 'fp16_full_eval': False, 'tf32': None, 'local_rank': 0, 'ddp_backend': None, 'tpu_num_cores': None, 'tpu_metrics_debug': False, 'debug': [], 'dataloader_drop_last': False, 'eval_steps': 500, 'dataloader_num_workers': 0, 'dataloader_prefetch_factor': None, 'past_index': -1, 'run_name': '/kaggle/working/', 'disable_tqdm': False, 'remove_unused_columns': True, 'label_names': None, 'load_best_model_at_end': False, 'metric_for_best_model': None, 'greater_is_better': None, 'ignore_data_skip': False, 'fsdp': [], 'fsdp_min_num_params': 0, 'fsdp_config': {'min_num_params': 0, 'xla': False, 'xla_fsdp_v2': False, 'xla_fsdp_grad_ckpt': False}, 'fsdp_transformer_layer_cls_to_wrap': None, 'accelerator_config': {'split_batches': False, 'dispatch_batches': None, 'even_batches': True, 'use_seedable_sampler': True}, 'deepspeed': None, 'label_smoothing_factor': 0.0, 'optim': 'adamw_torch', 'optim_args': None, 'adafactor': False, 'group_by_length': True, 'length_column_name': 'length', 'report_to': ['tensorboard', 'wandb'], 'ddp_find_unused_parameters': None, 'ddp_bucket_cap_mb': None, 'ddp_broadcast_buffers': None, 'dataloader_pin_memory': True, 'dataloader_persistent_workers': False, 'skip_memory_metrics': True, 'use_legacy_prediction_loop': False, 'push_to_hub': True, 'resume_from_checkpoint': None, 'hub_model_id': None, 'hub_strategy': 'every_save', 'hub_token': '<HUB_TOKEN>', 'hub_private_repo': False, 'hub_always_push': False, 'gradient_checkpointing': True, 'gradient_checkpointing_kwargs': None, 'include_inputs_for_metrics': False, 'fp16_backend': 'auto', 'push_to_hub_model_id': None, 'push_to_hub_organization': None, 'push_to_hub_token': '<PUSH_TO_HUB_TOKEN>', 'mp_parameters': '', 'auto_find_batch_size': False, 'full_determinism': False, 'torchdynamo': None, 'ray_scope': 'last', 'ddp_timeout': 1800, 'torch_compile': False, 'torch_compile_backend': None, 'torch_compile_mode': None, 'dispatch_batches': None, 'split_batches': None, 'include_tokens_per_second': False, 'include_num_input_tokens_seen': False, 'neftune_noise_alpha': None, 'optim_target_modules': None}
33
+ 2024-05-24 20:28:04,489 INFO MainThread:34 [jupyter.py:save_ipynb():373] not saving jupyter notebook
34
+ 2024-05-24 20:28:04,489 INFO MainThread:34 [wandb_init.py:_pause_backend():432] pausing backend
35
+ 2024-05-24 20:28:58,368 INFO MainThread:34 [wandb_init.py:_resume_backend():437] resuming backend
36
+ 2024-05-24 20:28:59,625 INFO MainThread:34 [jupyter.py:save_ipynb():373] not saving jupyter notebook
37
+ 2024-05-24 20:28:59,625 INFO MainThread:34 [wandb_init.py:_pause_backend():432] pausing backend
38
+ 2024-05-24 20:29:08,949 INFO MainThread:34 [wandb_init.py:_resume_backend():437] resuming backend
39
+ 2024-05-24 20:29:08,951 INFO MainThread:34 [jupyter.py:save_ipynb():373] not saving jupyter notebook
40
+ 2024-05-24 20:29:08,951 INFO MainThread:34 [wandb_init.py:_pause_backend():432] pausing backend
41
+ 2024-05-24 20:29:12,141 INFO MainThread:34 [wandb_init.py:_resume_backend():437] resuming backend
42
+ 2024-05-24 20:29:16,856 INFO MainThread:34 [jupyter.py:save_ipynb():373] not saving jupyter notebook
43
+ 2024-05-24 20:29:16,857 INFO MainThread:34 [wandb_init.py:_pause_backend():432] pausing backend
44
+ 2024-05-24 20:29:20,764 INFO MainThread:34 [wandb_init.py:_resume_backend():437] resuming backend
45
+ 2024-05-24 20:29:24,478 INFO MainThread:34 [jupyter.py:save_ipynb():373] not saving jupyter notebook
46
+ 2024-05-24 20:29:24,478 INFO MainThread:34 [wandb_init.py:_pause_backend():432] pausing backend
47
+ 2024-05-24 20:29:30,221 INFO MainThread:34 [wandb_init.py:_resume_backend():437] resuming backend
48
+ 2024-05-24 20:30:45,072 INFO MainThread:34 [jupyter.py:save_ipynb():373] not saving jupyter notebook
49
+ 2024-05-24 20:30:45,072 INFO MainThread:34 [wandb_init.py:_pause_backend():432] pausing backend
50
+ 2024-05-24 20:30:58,707 INFO MainThread:34 [wandb_init.py:_resume_backend():437] resuming backend
51
+ 2024-05-24 20:30:58,709 INFO MainThread:34 [jupyter.py:save_ipynb():373] not saving jupyter notebook
52
+ 2024-05-24 20:30:58,710 INFO MainThread:34 [wandb_init.py:_pause_backend():432] pausing backend
53
+ 2024-05-24 20:31:02,188 INFO MainThread:34 [wandb_init.py:_resume_backend():437] resuming backend
54
+ 2024-05-24 20:31:02,613 INFO MainThread:34 [jupyter.py:save_ipynb():373] not saving jupyter notebook
55
+ 2024-05-24 20:31:02,613 INFO MainThread:34 [wandb_init.py:_pause_backend():432] pausing backend
56
+ 2024-05-24 20:31:04,725 INFO MainThread:34 [wandb_init.py:_resume_backend():437] resuming backend
57
+ 2024-05-24 20:31:04,727 INFO MainThread:34 [jupyter.py:save_ipynb():373] not saving jupyter notebook
58
+ 2024-05-24 20:31:04,727 INFO MainThread:34 [wandb_init.py:_pause_backend():432] pausing backend
59
+ 2024-05-24 20:31:07,065 INFO MainThread:34 [wandb_init.py:_resume_backend():437] resuming backend
60
+ 2024-05-24 20:31:07,298 INFO MainThread:34 [jupyter.py:save_ipynb():373] not saving jupyter notebook
61
+ 2024-05-24 20:31:07,298 INFO MainThread:34 [wandb_init.py:_pause_backend():432] pausing backend
62
+ 2024-05-24 20:31:10,187 INFO MainThread:34 [wandb_init.py:_resume_backend():437] resuming backend
63
+ 2024-05-24 20:31:10,193 INFO MainThread:34 [jupyter.py:save_ipynb():373] not saving jupyter notebook
64
+ 2024-05-24 20:31:10,193 INFO MainThread:34 [wandb_init.py:_pause_backend():432] pausing backend
65
+ 2024-05-24 20:31:12,404 INFO MainThread:34 [wandb_init.py:_resume_backend():437] resuming backend
66
+ 2024-05-24 20:31:50,143 INFO MainThread:34 [wandb_run.py:_config_callback():1347] config_cb None None {'return_dict': True, 'output_hidden_states': False, 'output_attentions': False, 'torchscript': False, 'torch_dtype': None, 'use_bfloat16': False, 'tf_legacy_loss': False, 'pruned_heads': {}, 'tie_word_embeddings': True, 'chunk_size_feed_forward': 0, 'is_encoder_decoder': False, 'is_decoder': False, 'cross_attention_hidden_size': None, 'add_cross_attention': False, 'tie_encoder_decoder': False, 'max_length': 20, 'min_length': 0, 'do_sample': False, 'early_stopping': False, 'num_beams': 1, 'num_beam_groups': 1, 'diversity_penalty': 0.0, 'temperature': 1.0, 'top_k': 50, 'top_p': 1.0, 'typical_p': 1.0, 'repetition_penalty': 1.0, 'length_penalty': 1.0, 'no_repeat_ngram_size': 0, 'encoder_no_repeat_ngram_size': 0, 'bad_words_ids': None, 'num_return_sequences': 1, 'output_scores': False, 'return_dict_in_generate': False, 'forced_bos_token_id': None, 'forced_eos_token_id': None, 'remove_invalid_values': False, 'exponential_decay_length_penalty': None, 'suppress_tokens': None, 'begin_suppress_tokens': None, 'architectures': ['Wav2Vec2ForPreTraining'], 'finetuning_task': None, 'id2label': {0: 'LABEL_0', 1: 'LABEL_1'}, 'label2id': {'LABEL_0': 0, 'LABEL_1': 1}, 'tokenizer_class': None, 'prefix': None, 'bos_token_id': 1, 'pad_token_id': 0, 'eos_token_id': 2, 'sep_token_id': None, 'decoder_start_token_id': None, 'task_specific_params': None, 'problem_type': None, '_name_or_path': 'facebook/wav2vec2-base', 'transformers_version': '4.39.3', 'freeze_feat_extract_train': True, 'mask_channel_length': 10, 'mask_channel_min_space': 1, 'mask_channel_other': 0.0, 'mask_channel_prob': 0.0, 'mask_channel_selection': 'static', 'mask_time_min_space': 1, 'mask_time_other': 0.0, 'mask_time_selection': 'static', 'model_type': 'wav2vec2', 'no_mask_channel_overlap': False, 'no_mask_time_overlap': False, 'num_feat_extract_layers': 7, 'hidden_size': 768, 'feat_extract_norm': 'group', 'feat_extract_activation': 'gelu', 'conv_dim': [512, 512, 512, 512, 512, 512, 512], 'conv_stride': [5, 2, 2, 2, 2, 2, 2], 'conv_kernel': [10, 3, 3, 3, 3, 2, 2], 'conv_bias': False, 'num_conv_pos_embeddings': 128, 'num_conv_pos_embedding_groups': 16, 'num_hidden_layers': 12, 'intermediate_size': 3072, 'hidden_act': 'gelu', 'num_attention_heads': 12, 'hidden_dropout': 0.1, 'attention_dropout': 0.1, 'activation_dropout': 0.0, 'feat_proj_dropout': 0.1, 'final_dropout': 0.0, 'layerdrop': 0.0, 'layer_norm_eps': 1e-05, 'initializer_range': 0.02, 'vocab_size': 32, 'do_stable_layer_norm': False, 'use_weighted_layer_sum': False, 'apply_spec_augment': True, 'mask_time_prob': 0.05, 'mask_time_length': 10, 'mask_time_min_masks': 2, 'mask_feature_prob': 0.0, 'mask_feature_length': 10, 'mask_feature_min_masks': 0, 'num_codevectors_per_group': 320, 'num_codevector_groups': 2, 'contrastive_logits_temperature': 0.1, 'feat_quantizer_dropout': 0.0, 'num_negatives': 100, 'codevector_dim': 256, 'proj_codevector_dim': 256, 'diversity_loss_weight': 0.1, 'ctc_loss_reduction': 'sum', 'ctc_zero_infinity': False, 'add_adapter': False, 'adapter_kernel_size': 3, 'adapter_stride': 2, 'num_adapter_layers': 3, 'output_hidden_size': 768, 'adapter_attn_dim': None, 'classifier_proj_size': 256, 'tdnn_dim': [512, 512, 512, 512, 1500], 'tdnn_kernel': [5, 3, 3, 1, 1], 'tdnn_dilation': [1, 2, 3, 1, 1], 'xvector_output_dim': 512, 'output_dir': '/kaggle/working/', 'overwrite_output_dir': False, 'do_train': False, 'do_eval': True, 'do_predict': False, 'evaluation_strategy': 'steps', 'prediction_loss_only': False, 'per_device_train_batch_size': 32, 'per_device_eval_batch_size': 8, 'per_gpu_train_batch_size': None, 'per_gpu_eval_batch_size': None, 'gradient_accumulation_steps': 1, 'eval_accumulation_steps': None, 'eval_delay': 0, 'learning_rate': 0.0001, 'weight_decay': 0.005, 'adam_beta1': 0.9, 'adam_beta2': 0.999, 'adam_epsilon': 1e-08, 'max_grad_norm': 1.0, 'num_train_epochs': 30, 'max_steps': -1, 'lr_scheduler_type': 'linear', 'lr_scheduler_kwargs': {}, 'warmup_ratio': 0.0, 'warmup_steps': 1000, 'log_level': 'passive', 'log_level_replica': 'warning', 'log_on_each_node': True, 'logging_dir': '/kaggle/working/runs/May24_20-31-07_890e8b3ca76b', 'logging_strategy': 'steps', 'logging_first_step': False, 'logging_steps': 500, 'logging_nan_inf_filter': True, 'save_strategy': 'steps', 'save_steps': 500, 'save_total_limit': 2, 'save_safetensors': True, 'save_on_each_node': False, 'save_only_model': False, 'no_cuda': False, 'use_cpu': False, 'use_mps_device': False, 'seed': 42, 'data_seed': None, 'jit_mode_eval': False, 'use_ipex': False, 'bf16': False, 'fp16': True, 'fp16_opt_level': 'O1', 'half_precision_backend': 'auto', 'bf16_full_eval': False, 'fp16_full_eval': False, 'tf32': None, 'local_rank': 0, 'ddp_backend': None, 'tpu_num_cores': None, 'tpu_metrics_debug': False, 'debug': [], 'dataloader_drop_last': False, 'eval_steps': 500, 'dataloader_num_workers': 0, 'dataloader_prefetch_factor': None, 'past_index': -1, 'run_name': '/kaggle/working/', 'disable_tqdm': False, 'remove_unused_columns': True, 'label_names': None, 'load_best_model_at_end': False, 'metric_for_best_model': None, 'greater_is_better': None, 'ignore_data_skip': False, 'fsdp': [], 'fsdp_min_num_params': 0, 'fsdp_config': {'min_num_params': 0, 'xla': False, 'xla_fsdp_v2': False, 'xla_fsdp_grad_ckpt': False}, 'fsdp_transformer_layer_cls_to_wrap': None, 'accelerator_config': {'split_batches': False, 'dispatch_batches': None, 'even_batches': True, 'use_seedable_sampler': True}, 'deepspeed': None, 'label_smoothing_factor': 0.0, 'optim': 'adamw_torch', 'optim_args': None, 'adafactor': False, 'group_by_length': True, 'length_column_name': 'length', 'report_to': ['tensorboard', 'wandb'], 'ddp_find_unused_parameters': None, 'ddp_bucket_cap_mb': None, 'ddp_broadcast_buffers': None, 'dataloader_pin_memory': True, 'dataloader_persistent_workers': False, 'skip_memory_metrics': True, 'use_legacy_prediction_loop': False, 'push_to_hub': True, 'resume_from_checkpoint': None, 'hub_model_id': None, 'hub_strategy': 'every_save', 'hub_token': '<HUB_TOKEN>', 'hub_private_repo': False, 'hub_always_push': False, 'gradient_checkpointing': True, 'gradient_checkpointing_kwargs': None, 'include_inputs_for_metrics': False, 'fp16_backend': 'auto', 'push_to_hub_model_id': None, 'push_to_hub_organization': None, 'push_to_hub_token': '<PUSH_TO_HUB_TOKEN>', 'mp_parameters': '', 'auto_find_batch_size': False, 'full_determinism': False, 'torchdynamo': None, 'ray_scope': 'last', 'ddp_timeout': 1800, 'torch_compile': False, 'torch_compile_backend': None, 'torch_compile_mode': None, 'dispatch_batches': None, 'split_batches': None, 'include_tokens_per_second': False, 'include_num_input_tokens_seen': False, 'neftune_noise_alpha': None, 'optim_target_modules': None}
67
+ 2024-05-24 20:31:58,626 INFO MainThread:34 [jupyter.py:save_ipynb():373] not saving jupyter notebook
68
+ 2024-05-24 20:31:58,626 INFO MainThread:34 [wandb_init.py:_pause_backend():432] pausing backend
69
+ 2024-05-24 20:34:06,571 INFO MainThread:34 [wandb_init.py:_resume_backend():437] resuming backend
70
+ 2024-05-24 20:34:06,717 INFO MainThread:34 [jupyter.py:save_ipynb():373] not saving jupyter notebook
71
+ 2024-05-24 20:34:06,717 INFO MainThread:34 [wandb_init.py:_pause_backend():432] pausing backend
72
+ 2024-05-24 20:34:10,001 INFO MainThread:34 [wandb_init.py:_resume_backend():437] resuming backend
73
+ 2024-05-24 20:34:46,464 INFO MainThread:34 [wandb_run.py:_config_callback():1347] config_cb None None {'return_dict': True, 'output_hidden_states': False, 'output_attentions': False, 'torchscript': False, 'torch_dtype': None, 'use_bfloat16': False, 'tf_legacy_loss': False, 'pruned_heads': {}, 'tie_word_embeddings': True, 'chunk_size_feed_forward': 0, 'is_encoder_decoder': False, 'is_decoder': False, 'cross_attention_hidden_size': None, 'add_cross_attention': False, 'tie_encoder_decoder': False, 'max_length': 20, 'min_length': 0, 'do_sample': False, 'early_stopping': False, 'num_beams': 1, 'num_beam_groups': 1, 'diversity_penalty': 0.0, 'temperature': 1.0, 'top_k': 50, 'top_p': 1.0, 'typical_p': 1.0, 'repetition_penalty': 1.0, 'length_penalty': 1.0, 'no_repeat_ngram_size': 0, 'encoder_no_repeat_ngram_size': 0, 'bad_words_ids': None, 'num_return_sequences': 1, 'output_scores': False, 'return_dict_in_generate': False, 'forced_bos_token_id': None, 'forced_eos_token_id': None, 'remove_invalid_values': False, 'exponential_decay_length_penalty': None, 'suppress_tokens': None, 'begin_suppress_tokens': None, 'architectures': ['Wav2Vec2ForPreTraining'], 'finetuning_task': None, 'id2label': {0: 'LABEL_0', 1: 'LABEL_1'}, 'label2id': {'LABEL_0': 0, 'LABEL_1': 1}, 'tokenizer_class': None, 'prefix': None, 'bos_token_id': 1, 'pad_token_id': 0, 'eos_token_id': 2, 'sep_token_id': None, 'decoder_start_token_id': None, 'task_specific_params': None, 'problem_type': None, '_name_or_path': 'facebook/wav2vec2-base', 'transformers_version': '4.39.3', 'freeze_feat_extract_train': True, 'mask_channel_length': 10, 'mask_channel_min_space': 1, 'mask_channel_other': 0.0, 'mask_channel_prob': 0.0, 'mask_channel_selection': 'static', 'mask_time_min_space': 1, 'mask_time_other': 0.0, 'mask_time_selection': 'static', 'model_type': 'wav2vec2', 'no_mask_channel_overlap': False, 'no_mask_time_overlap': False, 'num_feat_extract_layers': 7, 'hidden_size': 768, 'feat_extract_norm': 'group', 'feat_extract_activation': 'gelu', 'conv_dim': [512, 512, 512, 512, 512, 512, 512], 'conv_stride': [5, 2, 2, 2, 2, 2, 2], 'conv_kernel': [10, 3, 3, 3, 3, 2, 2], 'conv_bias': False, 'num_conv_pos_embeddings': 128, 'num_conv_pos_embedding_groups': 16, 'num_hidden_layers': 12, 'intermediate_size': 3072, 'hidden_act': 'gelu', 'num_attention_heads': 12, 'hidden_dropout': 0.1, 'attention_dropout': 0.1, 'activation_dropout': 0.0, 'feat_proj_dropout': 0.1, 'final_dropout': 0.0, 'layerdrop': 0.0, 'layer_norm_eps': 1e-05, 'initializer_range': 0.02, 'vocab_size': 32, 'do_stable_layer_norm': False, 'use_weighted_layer_sum': False, 'apply_spec_augment': True, 'mask_time_prob': 0.05, 'mask_time_length': 10, 'mask_time_min_masks': 2, 'mask_feature_prob': 0.0, 'mask_feature_length': 10, 'mask_feature_min_masks': 0, 'num_codevectors_per_group': 320, 'num_codevector_groups': 2, 'contrastive_logits_temperature': 0.1, 'feat_quantizer_dropout': 0.0, 'num_negatives': 100, 'codevector_dim': 256, 'proj_codevector_dim': 256, 'diversity_loss_weight': 0.1, 'ctc_loss_reduction': 'sum', 'ctc_zero_infinity': False, 'add_adapter': False, 'adapter_kernel_size': 3, 'adapter_stride': 2, 'num_adapter_layers': 3, 'output_hidden_size': 768, 'adapter_attn_dim': None, 'classifier_proj_size': 256, 'tdnn_dim': [512, 512, 512, 512, 1500], 'tdnn_kernel': [5, 3, 3, 1, 1], 'tdnn_dilation': [1, 2, 3, 1, 1], 'xvector_output_dim': 512, 'output_dir': '/kaggle/working/', 'overwrite_output_dir': False, 'do_train': False, 'do_eval': True, 'do_predict': False, 'evaluation_strategy': 'steps', 'prediction_loss_only': False, 'per_device_train_batch_size': 64, 'per_device_eval_batch_size': 8, 'per_gpu_train_batch_size': None, 'per_gpu_eval_batch_size': None, 'gradient_accumulation_steps': 1, 'eval_accumulation_steps': None, 'eval_delay': 0, 'learning_rate': 0.0001, 'weight_decay': 0.005, 'adam_beta1': 0.9, 'adam_beta2': 0.999, 'adam_epsilon': 1e-08, 'max_grad_norm': 1.0, 'num_train_epochs': 30, 'max_steps': -1, 'lr_scheduler_type': 'linear', 'lr_scheduler_kwargs': {}, 'warmup_ratio': 0.0, 'warmup_steps': 1000, 'log_level': 'passive', 'log_level_replica': 'warning', 'log_on_each_node': True, 'logging_dir': '/kaggle/working/runs/May24_20-34-06_890e8b3ca76b', 'logging_strategy': 'steps', 'logging_first_step': False, 'logging_steps': 500, 'logging_nan_inf_filter': True, 'save_strategy': 'steps', 'save_steps': 500, 'save_total_limit': 2, 'save_safetensors': True, 'save_on_each_node': False, 'save_only_model': False, 'no_cuda': False, 'use_cpu': False, 'use_mps_device': False, 'seed': 42, 'data_seed': None, 'jit_mode_eval': False, 'use_ipex': False, 'bf16': False, 'fp16': True, 'fp16_opt_level': 'O1', 'half_precision_backend': 'auto', 'bf16_full_eval': False, 'fp16_full_eval': False, 'tf32': None, 'local_rank': 0, 'ddp_backend': None, 'tpu_num_cores': None, 'tpu_metrics_debug': False, 'debug': [], 'dataloader_drop_last': False, 'eval_steps': 500, 'dataloader_num_workers': 0, 'dataloader_prefetch_factor': None, 'past_index': -1, 'run_name': '/kaggle/working/', 'disable_tqdm': False, 'remove_unused_columns': True, 'label_names': None, 'load_best_model_at_end': False, 'metric_for_best_model': None, 'greater_is_better': None, 'ignore_data_skip': False, 'fsdp': [], 'fsdp_min_num_params': 0, 'fsdp_config': {'min_num_params': 0, 'xla': False, 'xla_fsdp_v2': False, 'xla_fsdp_grad_ckpt': False}, 'fsdp_transformer_layer_cls_to_wrap': None, 'accelerator_config': {'split_batches': False, 'dispatch_batches': None, 'even_batches': True, 'use_seedable_sampler': True}, 'deepspeed': None, 'label_smoothing_factor': 0.0, 'optim': 'adamw_torch', 'optim_args': None, 'adafactor': False, 'group_by_length': True, 'length_column_name': 'length', 'report_to': ['tensorboard', 'wandb'], 'ddp_find_unused_parameters': None, 'ddp_bucket_cap_mb': None, 'ddp_broadcast_buffers': None, 'dataloader_pin_memory': True, 'dataloader_persistent_workers': False, 'skip_memory_metrics': True, 'use_legacy_prediction_loop': False, 'push_to_hub': True, 'resume_from_checkpoint': None, 'hub_model_id': None, 'hub_strategy': 'every_save', 'hub_token': '<HUB_TOKEN>', 'hub_private_repo': False, 'hub_always_push': False, 'gradient_checkpointing': True, 'gradient_checkpointing_kwargs': None, 'include_inputs_for_metrics': False, 'fp16_backend': 'auto', 'push_to_hub_model_id': None, 'push_to_hub_organization': None, 'push_to_hub_token': '<PUSH_TO_HUB_TOKEN>', 'mp_parameters': '', 'auto_find_batch_size': False, 'full_determinism': False, 'torchdynamo': None, 'ray_scope': 'last', 'ddp_timeout': 1800, 'torch_compile': False, 'torch_compile_backend': None, 'torch_compile_mode': None, 'dispatch_batches': None, 'split_batches': None, 'include_tokens_per_second': False, 'include_num_input_tokens_seen': False, 'neftune_noise_alpha': None, 'optim_target_modules': None}
74
+ 2024-05-24 20:34:55,928 INFO MainThread:34 [jupyter.py:save_ipynb():373] not saving jupyter notebook
75
+ 2024-05-24 20:34:55,929 INFO MainThread:34 [wandb_init.py:_pause_backend():432] pausing backend
76
+ 2024-05-24 20:35:26,131 INFO MainThread:34 [wandb_init.py:_resume_backend():437] resuming backend
77
+ 2024-05-24 20:35:26,139 INFO MainThread:34 [jupyter.py:save_ipynb():373] not saving jupyter notebook
78
+ 2024-05-24 20:35:26,139 INFO MainThread:34 [wandb_init.py:_pause_backend():432] pausing backend
79
+ 2024-05-24 20:35:32,729 INFO MainThread:34 [wandb_init.py:_resume_backend():437] resuming backend
80
+ 2024-05-24 20:35:32,732 INFO MainThread:34 [jupyter.py:save_ipynb():373] not saving jupyter notebook
81
+ 2024-05-24 20:35:32,732 INFO MainThread:34 [wandb_init.py:_pause_backend():432] pausing backend
82
+ 2024-05-24 20:35:34,729 INFO MainThread:34 [wandb_init.py:_resume_backend():437] resuming backend
83
+ 2024-05-24 20:35:34,732 INFO MainThread:34 [jupyter.py:save_ipynb():373] not saving jupyter notebook
84
+ 2024-05-24 20:35:34,733 INFO MainThread:34 [wandb_init.py:_pause_backend():432] pausing backend
85
+ 2024-05-24 20:35:47,483 INFO MainThread:34 [wandb_init.py:_resume_backend():437] resuming backend
86
+ 2024-05-24 20:35:47,488 INFO MainThread:34 [jupyter.py:save_ipynb():373] not saving jupyter notebook
87
+ 2024-05-24 20:35:47,488 INFO MainThread:34 [wandb_init.py:_pause_backend():432] pausing backend
88
+ 2024-05-24 20:35:56,249 INFO MainThread:34 [wandb_init.py:_resume_backend():437] resuming backend
89
+ 2024-05-24 20:35:56,253 INFO MainThread:34 [jupyter.py:save_ipynb():373] not saving jupyter notebook
90
+ 2024-05-24 20:35:56,253 INFO MainThread:34 [wandb_init.py:_pause_backend():432] pausing backend
91
+ 2024-05-24 20:37:20,366 INFO MainThread:34 [wandb_init.py:_resume_backend():437] resuming backend
92
+ 2024-05-24 20:37:20,370 INFO MainThread:34 [jupyter.py:save_ipynb():373] not saving jupyter notebook
93
+ 2024-05-24 20:37:20,370 INFO MainThread:34 [wandb_init.py:_pause_backend():432] pausing backend
94
+ 2024-05-24 20:37:28,481 INFO MainThread:34 [wandb_init.py:_resume_backend():437] resuming backend
95
+ 2024-05-24 20:37:28,510 INFO MainThread:34 [jupyter.py:save_ipynb():373] not saving jupyter notebook
96
+ 2024-05-24 20:37:28,511 INFO MainThread:34 [wandb_init.py:_pause_backend():432] pausing backend
97
+ 2024-05-24 20:38:00,946 INFO MainThread:34 [wandb_init.py:_resume_backend():437] resuming backend
98
+ 2024-05-24 20:38:00,950 INFO MainThread:34 [jupyter.py:save_ipynb():373] not saving jupyter notebook
99
+ 2024-05-24 20:38:00,950 INFO MainThread:34 [wandb_init.py:_pause_backend():432] pausing backend
100
+ 2024-05-24 20:38:04,524 INFO MainThread:34 [wandb_init.py:_resume_backend():437] resuming backend
101
+ 2024-05-24 20:38:04,550 INFO MainThread:34 [jupyter.py:save_ipynb():373] not saving jupyter notebook
102
+ 2024-05-24 20:38:04,550 INFO MainThread:34 [wandb_init.py:_pause_backend():432] pausing backend
103
+ 2024-05-24 20:38:12,369 INFO MainThread:34 [wandb_init.py:_resume_backend():437] resuming backend
104
+ 2024-05-24 20:38:12,432 INFO MainThread:34 [jupyter.py:save_ipynb():373] not saving jupyter notebook
105
+ 2024-05-24 20:38:12,432 INFO MainThread:34 [wandb_init.py:_pause_backend():432] pausing backend
106
+ 2024-05-24 20:38:16,938 INFO MainThread:34 [wandb_init.py:_resume_backend():437] resuming backend
107
+ 2024-05-24 20:38:16,998 INFO MainThread:34 [jupyter.py:save_ipynb():373] not saving jupyter notebook
108
+ 2024-05-24 20:38:16,998 INFO MainThread:34 [wandb_init.py:_pause_backend():432] pausing backend
109
+ 2024-05-24 20:38:21,248 INFO MainThread:34 [wandb_init.py:_resume_backend():437] resuming backend
110
+ 2024-05-24 20:38:21,260 INFO MainThread:34 [jupyter.py:save_ipynb():373] not saving jupyter notebook
111
+ 2024-05-24 20:38:21,260 INFO MainThread:34 [wandb_init.py:_pause_backend():432] pausing backend
112
+ 2024-05-24 20:38:22,930 INFO MainThread:34 [wandb_init.py:_resume_backend():437] resuming backend
113
+ 2024-05-24 20:38:22,940 INFO MainThread:34 [jupyter.py:save_ipynb():373] not saving jupyter notebook
114
+ 2024-05-24 20:38:22,940 INFO MainThread:34 [wandb_init.py:_pause_backend():432] pausing backend
115
+ 2024-05-24 20:38:37,851 INFO MainThread:34 [wandb_init.py:_resume_backend():437] resuming backend
116
+ 2024-05-24 20:38:55,254 INFO MainThread:34 [jupyter.py:save_ipynb():373] not saving jupyter notebook
117
+ 2024-05-24 20:38:55,254 INFO MainThread:34 [wandb_init.py:_pause_backend():432] pausing backend
118
+ 2024-05-24 20:38:58,788 INFO MainThread:34 [wandb_init.py:_resume_backend():437] resuming backend
119
+ 2024-05-24 20:38:58,827 INFO MainThread:34 [jupyter.py:save_ipynb():373] not saving jupyter notebook
120
+ 2024-05-24 20:38:58,827 INFO MainThread:34 [wandb_init.py:_pause_backend():432] pausing backend
121
+ 2024-05-24 20:39:00,762 INFO MainThread:34 [wandb_init.py:_resume_backend():437] resuming backend
122
+ 2024-05-24 20:40:16,333 INFO MainThread:34 [jupyter.py:save_ipynb():373] not saving jupyter notebook
123
+ 2024-05-24 20:40:16,334 INFO MainThread:34 [wandb_init.py:_pause_backend():432] pausing backend
124
+ 2024-05-24 20:40:21,472 INFO MainThread:34 [wandb_init.py:_resume_backend():437] resuming backend
125
+ 2024-05-24 20:40:22,682 INFO MainThread:34 [jupyter.py:save_ipynb():373] not saving jupyter notebook
126
+ 2024-05-24 20:40:22,682 INFO MainThread:34 [wandb_init.py:_pause_backend():432] pausing backend
127
+ 2024-05-24 20:40:28,979 INFO MainThread:34 [wandb_init.py:_resume_backend():437] resuming backend
128
+ 2024-05-24 20:40:33,961 INFO MainThread:34 [jupyter.py:save_ipynb():373] not saving jupyter notebook
129
+ 2024-05-24 20:40:33,961 INFO MainThread:34 [wandb_init.py:_pause_backend():432] pausing backend
130
+ 2024-05-24 20:40:36,994 INFO MainThread:34 [wandb_init.py:_resume_backend():437] resuming backend
131
+ 2024-05-24 20:40:36,996 INFO MainThread:34 [jupyter.py:save_ipynb():373] not saving jupyter notebook
132
+ 2024-05-24 20:40:36,997 INFO MainThread:34 [wandb_init.py:_pause_backend():432] pausing backend
133
+ 2024-05-24 20:40:42,203 INFO MainThread:34 [wandb_init.py:_resume_backend():437] resuming backend
134
+ 2024-05-24 20:40:42,621 INFO MainThread:34 [jupyter.py:save_ipynb():373] not saving jupyter notebook
135
+ 2024-05-24 20:40:42,621 INFO MainThread:34 [wandb_init.py:_pause_backend():432] pausing backend
136
+ 2024-05-24 20:40:44,473 INFO MainThread:34 [wandb_init.py:_resume_backend():437] resuming backend
137
+ 2024-05-24 20:40:44,475 INFO MainThread:34 [jupyter.py:save_ipynb():373] not saving jupyter notebook
138
+ 2024-05-24 20:40:44,475 INFO MainThread:34 [wandb_init.py:_pause_backend():432] pausing backend
139
+ 2024-05-24 20:40:48,431 INFO MainThread:34 [wandb_init.py:_resume_backend():437] resuming backend
140
+ 2024-05-24 20:40:48,669 INFO MainThread:34 [jupyter.py:save_ipynb():373] not saving jupyter notebook
141
+ 2024-05-24 20:40:48,669 INFO MainThread:34 [wandb_init.py:_pause_backend():432] pausing backend
142
+ 2024-05-24 20:40:50,725 INFO MainThread:34 [wandb_init.py:_resume_backend():437] resuming backend
143
+ 2024-05-24 20:40:50,730 INFO MainThread:34 [jupyter.py:save_ipynb():373] not saving jupyter notebook
144
+ 2024-05-24 20:40:50,730 INFO MainThread:34 [wandb_init.py:_pause_backend():432] pausing backend
145
+ 2024-05-24 20:40:52,537 INFO MainThread:34 [wandb_init.py:_resume_backend():437] resuming backend
146
+ 2024-05-24 20:41:30,450 INFO MainThread:34 [wandb_run.py:_config_callback():1347] config_cb None None {'return_dict': True, 'output_hidden_states': False, 'output_attentions': False, 'torchscript': False, 'torch_dtype': None, 'use_bfloat16': False, 'tf_legacy_loss': False, 'pruned_heads': {}, 'tie_word_embeddings': True, 'chunk_size_feed_forward': 0, 'is_encoder_decoder': False, 'is_decoder': False, 'cross_attention_hidden_size': None, 'add_cross_attention': False, 'tie_encoder_decoder': False, 'max_length': 20, 'min_length': 0, 'do_sample': False, 'early_stopping': False, 'num_beams': 1, 'num_beam_groups': 1, 'diversity_penalty': 0.0, 'temperature': 1.0, 'top_k': 50, 'top_p': 1.0, 'typical_p': 1.0, 'repetition_penalty': 1.0, 'length_penalty': 1.0, 'no_repeat_ngram_size': 0, 'encoder_no_repeat_ngram_size': 0, 'bad_words_ids': None, 'num_return_sequences': 1, 'output_scores': False, 'return_dict_in_generate': False, 'forced_bos_token_id': None, 'forced_eos_token_id': None, 'remove_invalid_values': False, 'exponential_decay_length_penalty': None, 'suppress_tokens': None, 'begin_suppress_tokens': None, 'architectures': ['Wav2Vec2ForPreTraining'], 'finetuning_task': None, 'id2label': {0: 'LABEL_0', 1: 'LABEL_1'}, 'label2id': {'LABEL_0': 0, 'LABEL_1': 1}, 'tokenizer_class': None, 'prefix': None, 'bos_token_id': 1, 'pad_token_id': 0, 'eos_token_id': 2, 'sep_token_id': None, 'decoder_start_token_id': None, 'task_specific_params': None, 'problem_type': None, '_name_or_path': 'facebook/wav2vec2-base', 'transformers_version': '4.39.3', 'freeze_feat_extract_train': True, 'mask_channel_length': 10, 'mask_channel_min_space': 1, 'mask_channel_other': 0.0, 'mask_channel_prob': 0.0, 'mask_channel_selection': 'static', 'mask_time_min_space': 1, 'mask_time_other': 0.0, 'mask_time_selection': 'static', 'model_type': 'wav2vec2', 'no_mask_channel_overlap': False, 'no_mask_time_overlap': False, 'num_feat_extract_layers': 7, 'hidden_size': 768, 'feat_extract_norm': 'group', 'feat_extract_activation': 'gelu', 'conv_dim': [512, 512, 512, 512, 512, 512, 512], 'conv_stride': [5, 2, 2, 2, 2, 2, 2], 'conv_kernel': [10, 3, 3, 3, 3, 2, 2], 'conv_bias': False, 'num_conv_pos_embeddings': 128, 'num_conv_pos_embedding_groups': 16, 'num_hidden_layers': 12, 'intermediate_size': 3072, 'hidden_act': 'gelu', 'num_attention_heads': 12, 'hidden_dropout': 0.1, 'attention_dropout': 0.1, 'activation_dropout': 0.0, 'feat_proj_dropout': 0.1, 'final_dropout': 0.0, 'layerdrop': 0.0, 'layer_norm_eps': 1e-05, 'initializer_range': 0.02, 'vocab_size': 32, 'do_stable_layer_norm': False, 'use_weighted_layer_sum': False, 'apply_spec_augment': True, 'mask_time_prob': 0.05, 'mask_time_length': 10, 'mask_time_min_masks': 2, 'mask_feature_prob': 0.0, 'mask_feature_length': 10, 'mask_feature_min_masks': 0, 'num_codevectors_per_group': 320, 'num_codevector_groups': 2, 'contrastive_logits_temperature': 0.1, 'feat_quantizer_dropout': 0.0, 'num_negatives': 100, 'codevector_dim': 256, 'proj_codevector_dim': 256, 'diversity_loss_weight': 0.1, 'ctc_loss_reduction': 'sum', 'ctc_zero_infinity': False, 'add_adapter': False, 'adapter_kernel_size': 3, 'adapter_stride': 2, 'num_adapter_layers': 3, 'output_hidden_size': 768, 'adapter_attn_dim': None, 'classifier_proj_size': 256, 'tdnn_dim': [512, 512, 512, 512, 1500], 'tdnn_kernel': [5, 3, 3, 1, 1], 'tdnn_dilation': [1, 2, 3, 1, 1], 'xvector_output_dim': 512, 'output_dir': '/kaggle/working/', 'overwrite_output_dir': False, 'do_train': False, 'do_eval': True, 'do_predict': False, 'evaluation_strategy': 'steps', 'prediction_loss_only': False, 'per_device_train_batch_size': 64, 'per_device_eval_batch_size': 8, 'per_gpu_train_batch_size': None, 'per_gpu_eval_batch_size': None, 'gradient_accumulation_steps': 1, 'eval_accumulation_steps': None, 'eval_delay': 0, 'learning_rate': 0.0001, 'weight_decay': 0.005, 'adam_beta1': 0.9, 'adam_beta2': 0.999, 'adam_epsilon': 1e-08, 'max_grad_norm': 1.0, 'num_train_epochs': 30, 'max_steps': -1, 'lr_scheduler_type': 'linear', 'lr_scheduler_kwargs': {}, 'warmup_ratio': 0.0, 'warmup_steps': 1000, 'log_level': 'passive', 'log_level_replica': 'warning', 'log_on_each_node': True, 'logging_dir': '/kaggle/working/runs/May24_20-40-48_890e8b3ca76b', 'logging_strategy': 'steps', 'logging_first_step': False, 'logging_steps': 500, 'logging_nan_inf_filter': True, 'save_strategy': 'steps', 'save_steps': 500, 'save_total_limit': 2, 'save_safetensors': True, 'save_on_each_node': False, 'save_only_model': False, 'no_cuda': False, 'use_cpu': False, 'use_mps_device': False, 'seed': 42, 'data_seed': None, 'jit_mode_eval': False, 'use_ipex': False, 'bf16': False, 'fp16': True, 'fp16_opt_level': 'O1', 'half_precision_backend': 'auto', 'bf16_full_eval': False, 'fp16_full_eval': False, 'tf32': None, 'local_rank': 0, 'ddp_backend': None, 'tpu_num_cores': None, 'tpu_metrics_debug': False, 'debug': [], 'dataloader_drop_last': False, 'eval_steps': 500, 'dataloader_num_workers': 0, 'dataloader_prefetch_factor': None, 'past_index': -1, 'run_name': '/kaggle/working/', 'disable_tqdm': False, 'remove_unused_columns': True, 'label_names': None, 'load_best_model_at_end': False, 'metric_for_best_model': None, 'greater_is_better': None, 'ignore_data_skip': False, 'fsdp': [], 'fsdp_min_num_params': 0, 'fsdp_config': {'min_num_params': 0, 'xla': False, 'xla_fsdp_v2': False, 'xla_fsdp_grad_ckpt': False}, 'fsdp_transformer_layer_cls_to_wrap': None, 'accelerator_config': {'split_batches': False, 'dispatch_batches': None, 'even_batches': True, 'use_seedable_sampler': True}, 'deepspeed': None, 'label_smoothing_factor': 0.0, 'optim': 'adamw_torch', 'optim_args': None, 'adafactor': False, 'group_by_length': True, 'length_column_name': 'length', 'report_to': ['tensorboard', 'wandb'], 'ddp_find_unused_parameters': None, 'ddp_bucket_cap_mb': None, 'ddp_broadcast_buffers': None, 'dataloader_pin_memory': True, 'dataloader_persistent_workers': False, 'skip_memory_metrics': True, 'use_legacy_prediction_loop': False, 'push_to_hub': True, 'resume_from_checkpoint': None, 'hub_model_id': None, 'hub_strategy': 'every_save', 'hub_token': '<HUB_TOKEN>', 'hub_private_repo': False, 'hub_always_push': False, 'gradient_checkpointing': True, 'gradient_checkpointing_kwargs': None, 'include_inputs_for_metrics': False, 'fp16_backend': 'auto', 'push_to_hub_model_id': None, 'push_to_hub_organization': None, 'push_to_hub_token': '<PUSH_TO_HUB_TOKEN>', 'mp_parameters': '', 'auto_find_batch_size': False, 'full_determinism': False, 'torchdynamo': None, 'ray_scope': 'last', 'ddp_timeout': 1800, 'torch_compile': False, 'torch_compile_backend': None, 'torch_compile_mode': None, 'dispatch_batches': None, 'split_batches': None, 'include_tokens_per_second': False, 'include_num_input_tokens_seen': False, 'neftune_noise_alpha': None, 'optim_target_modules': None}
147
+ 2024-05-24 20:41:39,043 INFO MainThread:34 [jupyter.py:save_ipynb():373] not saving jupyter notebook
148
+ 2024-05-24 20:41:39,044 INFO MainThread:34 [wandb_init.py:_pause_backend():432] pausing backend
149
+ 2024-05-24 20:42:26,950 INFO MainThread:34 [wandb_init.py:_resume_backend():437] resuming backend
150
+ 2024-05-24 20:42:27,128 INFO MainThread:34 [jupyter.py:save_ipynb():373] not saving jupyter notebook
151
+ 2024-05-24 20:42:27,128 INFO MainThread:34 [wandb_init.py:_pause_backend():432] pausing backend
152
+ 2024-05-24 20:42:29,871 INFO MainThread:34 [wandb_init.py:_resume_backend():437] resuming backend
153
+ 2024-05-24 20:43:07,276 INFO MainThread:34 [wandb_run.py:_config_callback():1347] config_cb None None {'return_dict': True, 'output_hidden_states': False, 'output_attentions': False, 'torchscript': False, 'torch_dtype': None, 'use_bfloat16': False, 'tf_legacy_loss': False, 'pruned_heads': {}, 'tie_word_embeddings': True, 'chunk_size_feed_forward': 0, 'is_encoder_decoder': False, 'is_decoder': False, 'cross_attention_hidden_size': None, 'add_cross_attention': False, 'tie_encoder_decoder': False, 'max_length': 20, 'min_length': 0, 'do_sample': False, 'early_stopping': False, 'num_beams': 1, 'num_beam_groups': 1, 'diversity_penalty': 0.0, 'temperature': 1.0, 'top_k': 50, 'top_p': 1.0, 'typical_p': 1.0, 'repetition_penalty': 1.0, 'length_penalty': 1.0, 'no_repeat_ngram_size': 0, 'encoder_no_repeat_ngram_size': 0, 'bad_words_ids': None, 'num_return_sequences': 1, 'output_scores': False, 'return_dict_in_generate': False, 'forced_bos_token_id': None, 'forced_eos_token_id': None, 'remove_invalid_values': False, 'exponential_decay_length_penalty': None, 'suppress_tokens': None, 'begin_suppress_tokens': None, 'architectures': ['Wav2Vec2ForPreTraining'], 'finetuning_task': None, 'id2label': {0: 'LABEL_0', 1: 'LABEL_1'}, 'label2id': {'LABEL_0': 0, 'LABEL_1': 1}, 'tokenizer_class': None, 'prefix': None, 'bos_token_id': 1, 'pad_token_id': 0, 'eos_token_id': 2, 'sep_token_id': None, 'decoder_start_token_id': None, 'task_specific_params': None, 'problem_type': None, '_name_or_path': 'facebook/wav2vec2-base', 'transformers_version': '4.39.3', 'freeze_feat_extract_train': True, 'mask_channel_length': 10, 'mask_channel_min_space': 1, 'mask_channel_other': 0.0, 'mask_channel_prob': 0.0, 'mask_channel_selection': 'static', 'mask_time_min_space': 1, 'mask_time_other': 0.0, 'mask_time_selection': 'static', 'model_type': 'wav2vec2', 'no_mask_channel_overlap': False, 'no_mask_time_overlap': False, 'num_feat_extract_layers': 7, 'hidden_size': 768, 'feat_extract_norm': 'group', 'feat_extract_activation': 'gelu', 'conv_dim': [512, 512, 512, 512, 512, 512, 512], 'conv_stride': [5, 2, 2, 2, 2, 2, 2], 'conv_kernel': [10, 3, 3, 3, 3, 2, 2], 'conv_bias': False, 'num_conv_pos_embeddings': 128, 'num_conv_pos_embedding_groups': 16, 'num_hidden_layers': 12, 'intermediate_size': 3072, 'hidden_act': 'gelu', 'num_attention_heads': 12, 'hidden_dropout': 0.1, 'attention_dropout': 0.1, 'activation_dropout': 0.0, 'feat_proj_dropout': 0.1, 'final_dropout': 0.0, 'layerdrop': 0.0, 'layer_norm_eps': 1e-05, 'initializer_range': 0.02, 'vocab_size': 32, 'do_stable_layer_norm': False, 'use_weighted_layer_sum': False, 'apply_spec_augment': True, 'mask_time_prob': 0.05, 'mask_time_length': 10, 'mask_time_min_masks': 2, 'mask_feature_prob': 0.0, 'mask_feature_length': 10, 'mask_feature_min_masks': 0, 'num_codevectors_per_group': 320, 'num_codevector_groups': 2, 'contrastive_logits_temperature': 0.1, 'feat_quantizer_dropout': 0.0, 'num_negatives': 100, 'codevector_dim': 256, 'proj_codevector_dim': 256, 'diversity_loss_weight': 0.1, 'ctc_loss_reduction': 'sum', 'ctc_zero_infinity': False, 'add_adapter': False, 'adapter_kernel_size': 3, 'adapter_stride': 2, 'num_adapter_layers': 3, 'output_hidden_size': 768, 'adapter_attn_dim': None, 'classifier_proj_size': 256, 'tdnn_dim': [512, 512, 512, 512, 1500], 'tdnn_kernel': [5, 3, 3, 1, 1], 'tdnn_dilation': [1, 2, 3, 1, 1], 'xvector_output_dim': 512, 'output_dir': '/kaggle/working/', 'overwrite_output_dir': False, 'do_train': False, 'do_eval': True, 'do_predict': False, 'evaluation_strategy': 'steps', 'prediction_loss_only': False, 'per_device_train_batch_size': 16, 'per_device_eval_batch_size': 8, 'per_gpu_train_batch_size': None, 'per_gpu_eval_batch_size': None, 'gradient_accumulation_steps': 1, 'eval_accumulation_steps': None, 'eval_delay': 0, 'learning_rate': 0.0001, 'weight_decay': 0.005, 'adam_beta1': 0.9, 'adam_beta2': 0.999, 'adam_epsilon': 1e-08, 'max_grad_norm': 1.0, 'num_train_epochs': 30, 'max_steps': -1, 'lr_scheduler_type': 'linear', 'lr_scheduler_kwargs': {}, 'warmup_ratio': 0.0, 'warmup_steps': 1000, 'log_level': 'passive', 'log_level_replica': 'warning', 'log_on_each_node': True, 'logging_dir': '/kaggle/working/runs/May24_20-42-26_890e8b3ca76b', 'logging_strategy': 'steps', 'logging_first_step': False, 'logging_steps': 500, 'logging_nan_inf_filter': True, 'save_strategy': 'steps', 'save_steps': 500, 'save_total_limit': 2, 'save_safetensors': True, 'save_on_each_node': False, 'save_only_model': False, 'no_cuda': False, 'use_cpu': False, 'use_mps_device': False, 'seed': 42, 'data_seed': None, 'jit_mode_eval': False, 'use_ipex': False, 'bf16': False, 'fp16': True, 'fp16_opt_level': 'O1', 'half_precision_backend': 'auto', 'bf16_full_eval': False, 'fp16_full_eval': False, 'tf32': None, 'local_rank': 0, 'ddp_backend': None, 'tpu_num_cores': None, 'tpu_metrics_debug': False, 'debug': [], 'dataloader_drop_last': False, 'eval_steps': 500, 'dataloader_num_workers': 0, 'dataloader_prefetch_factor': None, 'past_index': -1, 'run_name': '/kaggle/working/', 'disable_tqdm': False, 'remove_unused_columns': True, 'label_names': None, 'load_best_model_at_end': False, 'metric_for_best_model': None, 'greater_is_better': None, 'ignore_data_skip': False, 'fsdp': [], 'fsdp_min_num_params': 0, 'fsdp_config': {'min_num_params': 0, 'xla': False, 'xla_fsdp_v2': False, 'xla_fsdp_grad_ckpt': False}, 'fsdp_transformer_layer_cls_to_wrap': None, 'accelerator_config': {'split_batches': False, 'dispatch_batches': None, 'even_batches': True, 'use_seedable_sampler': True}, 'deepspeed': None, 'label_smoothing_factor': 0.0, 'optim': 'adamw_torch', 'optim_args': None, 'adafactor': False, 'group_by_length': True, 'length_column_name': 'length', 'report_to': ['tensorboard', 'wandb'], 'ddp_find_unused_parameters': None, 'ddp_bucket_cap_mb': None, 'ddp_broadcast_buffers': None, 'dataloader_pin_memory': True, 'dataloader_persistent_workers': False, 'skip_memory_metrics': True, 'use_legacy_prediction_loop': False, 'push_to_hub': True, 'resume_from_checkpoint': None, 'hub_model_id': None, 'hub_strategy': 'every_save', 'hub_token': '<HUB_TOKEN>', 'hub_private_repo': False, 'hub_always_push': False, 'gradient_checkpointing': True, 'gradient_checkpointing_kwargs': None, 'include_inputs_for_metrics': False, 'fp16_backend': 'auto', 'push_to_hub_model_id': None, 'push_to_hub_organization': None, 'push_to_hub_token': '<PUSH_TO_HUB_TOKEN>', 'mp_parameters': '', 'auto_find_batch_size': False, 'full_determinism': False, 'torchdynamo': None, 'ray_scope': 'last', 'ddp_timeout': 1800, 'torch_compile': False, 'torch_compile_backend': None, 'torch_compile_mode': None, 'dispatch_batches': None, 'split_batches': None, 'include_tokens_per_second': False, 'include_num_input_tokens_seen': False, 'neftune_noise_alpha': None, 'optim_target_modules': None}
154
+ 2024-05-24 20:43:12,247 INFO MainThread:34 [jupyter.py:save_ipynb():373] not saving jupyter notebook
155
+ 2024-05-24 20:43:12,247 INFO MainThread:34 [wandb_init.py:_pause_backend():432] pausing backend
156
+ 2024-05-24 20:44:08,643 INFO MainThread:34 [wandb_init.py:_resume_backend():437] resuming backend
157
+ 2024-05-24 20:44:14,294 INFO MainThread:34 [jupyter.py:save_ipynb():373] not saving jupyter notebook
158
+ 2024-05-24 20:44:14,295 INFO MainThread:34 [wandb_init.py:_pause_backend():432] pausing backend
159
+ 2024-05-24 20:44:39,719 INFO MainThread:34 [wandb_init.py:_resume_backend():437] resuming backend
160
+ 2024-05-24 20:44:39,770 INFO MainThread:34 [jupyter.py:save_ipynb():373] not saving jupyter notebook
161
+ 2024-05-24 20:44:39,770 INFO MainThread:34 [wandb_init.py:_pause_backend():432] pausing backend
162
+ 2024-05-24 20:44:43,543 INFO MainThread:34 [wandb_init.py:_resume_backend():437] resuming backend
163
+ 2024-05-24 20:46:31,143 INFO MainThread:34 [jupyter.py:save_ipynb():373] not saving jupyter notebook
164
+ 2024-05-24 20:46:31,143 INFO MainThread:34 [wandb_init.py:_pause_backend():432] pausing backend
165
+ 2024-05-24 20:46:49,361 INFO MainThread:34 [wandb_init.py:_resume_backend():437] resuming backend
166
+ 2024-05-24 20:46:49,391 INFO MainThread:34 [jupyter.py:save_ipynb():373] not saving jupyter notebook
167
+ 2024-05-24 20:46:49,391 INFO MainThread:34 [wandb_init.py:_pause_backend():432] pausing backend
168
+ 2024-05-24 20:46:52,169 INFO MainThread:34 [wandb_init.py:_resume_backend():437] resuming backend
169
+ 2024-05-24 20:46:54,391 INFO MainThread:34 [jupyter.py:save_ipynb():373] not saving jupyter notebook
170
+ 2024-05-24 20:46:54,391 INFO MainThread:34 [wandb_init.py:_pause_backend():432] pausing backend
171
+ 2024-05-24 20:46:58,317 INFO MainThread:34 [wandb_init.py:_resume_backend():437] resuming backend
172
+ 2024-05-24 20:46:58,322 INFO MainThread:34 [jupyter.py:save_ipynb():373] not saving jupyter notebook
173
+ 2024-05-24 20:46:58,322 INFO MainThread:34 [wandb_init.py:_pause_backend():432] pausing backend
174
+ 2024-05-24 20:47:06,482 INFO MainThread:34 [wandb_init.py:_resume_backend():437] resuming backend
175
+ 2024-05-24 20:47:07,830 INFO MainThread:34 [jupyter.py:save_ipynb():373] not saving jupyter notebook
176
+ 2024-05-24 20:47:07,830 INFO MainThread:34 [wandb_init.py:_pause_backend():432] pausing backend
177
+ 2024-05-24 20:47:10,833 INFO MainThread:34 [wandb_init.py:_resume_backend():437] resuming backend
178
+ 2024-05-24 20:47:10,835 INFO MainThread:34 [jupyter.py:save_ipynb():373] not saving jupyter notebook
179
+ 2024-05-24 20:47:10,839 INFO MainThread:34 [wandb_init.py:_pause_backend():432] pausing backend
180
+ 2024-05-24 20:47:14,002 INFO MainThread:34 [wandb_init.py:_resume_backend():437] resuming backend
181
+ 2024-05-24 20:48:02,460 INFO MainThread:34 [jupyter.py:save_ipynb():373] not saving jupyter notebook
182
+ 2024-05-24 20:48:02,460 INFO MainThread:34 [wandb_init.py:_pause_backend():432] pausing backend
183
+ 2024-05-24 20:48:05,731 INFO MainThread:34 [wandb_init.py:_resume_backend():437] resuming backend
184
+ 2024-05-24 20:48:05,733 INFO MainThread:34 [jupyter.py:save_ipynb():373] not saving jupyter notebook
185
+ 2024-05-24 20:48:05,734 INFO MainThread:34 [wandb_init.py:_pause_backend():432] pausing backend
186
+ 2024-05-24 20:48:08,287 INFO MainThread:34 [wandb_init.py:_resume_backend():437] resuming backend
187
+ 2024-05-24 20:48:08,714 INFO MainThread:34 [jupyter.py:save_ipynb():373] not saving jupyter notebook
188
+ 2024-05-24 20:48:08,714 INFO MainThread:34 [wandb_init.py:_pause_backend():432] pausing backend
189
+ 2024-05-24 20:48:10,290 INFO MainThread:34 [wandb_init.py:_resume_backend():437] resuming backend
190
+ 2024-05-24 20:48:10,293 INFO MainThread:34 [jupyter.py:save_ipynb():373] not saving jupyter notebook
191
+ 2024-05-24 20:48:10,294 INFO MainThread:34 [wandb_init.py:_pause_backend():432] pausing backend
192
+ 2024-05-24 20:48:13,841 INFO MainThread:34 [wandb_init.py:_resume_backend():437] resuming backend
193
+ 2024-05-24 20:48:14,091 INFO MainThread:34 [jupyter.py:save_ipynb():373] not saving jupyter notebook
194
+ 2024-05-24 20:48:14,092 INFO MainThread:34 [wandb_init.py:_pause_backend():432] pausing backend
195
+ 2024-05-24 20:48:16,125 INFO MainThread:34 [wandb_init.py:_resume_backend():437] resuming backend
196
+ 2024-05-24 20:48:40,373 INFO MainThread:34 [wandb_run.py:_config_callback():1347] config_cb None None {'return_dict': True, 'output_hidden_states': False, 'output_attentions': False, 'torchscript': False, 'torch_dtype': None, 'use_bfloat16': False, 'tf_legacy_loss': False, 'pruned_heads': {}, 'tie_word_embeddings': True, 'chunk_size_feed_forward': 0, 'is_encoder_decoder': False, 'is_decoder': False, 'cross_attention_hidden_size': None, 'add_cross_attention': False, 'tie_encoder_decoder': False, 'max_length': 20, 'min_length': 0, 'do_sample': False, 'early_stopping': False, 'num_beams': 1, 'num_beam_groups': 1, 'diversity_penalty': 0.0, 'temperature': 1.0, 'top_k': 50, 'top_p': 1.0, 'typical_p': 1.0, 'repetition_penalty': 1.0, 'length_penalty': 1.0, 'no_repeat_ngram_size': 0, 'encoder_no_repeat_ngram_size': 0, 'bad_words_ids': None, 'num_return_sequences': 1, 'output_scores': False, 'return_dict_in_generate': False, 'forced_bos_token_id': None, 'forced_eos_token_id': None, 'remove_invalid_values': False, 'exponential_decay_length_penalty': None, 'suppress_tokens': None, 'begin_suppress_tokens': None, 'architectures': ['Wav2Vec2ForPreTraining'], 'finetuning_task': None, 'id2label': {0: 'LABEL_0', 1: 'LABEL_1'}, 'label2id': {'LABEL_0': 0, 'LABEL_1': 1}, 'tokenizer_class': None, 'prefix': None, 'bos_token_id': 1, 'pad_token_id': 0, 'eos_token_id': 2, 'sep_token_id': None, 'decoder_start_token_id': None, 'task_specific_params': None, 'problem_type': None, '_name_or_path': 'facebook/wav2vec2-base', 'transformers_version': '4.39.3', 'freeze_feat_extract_train': True, 'mask_channel_length': 10, 'mask_channel_min_space': 1, 'mask_channel_other': 0.0, 'mask_channel_prob': 0.0, 'mask_channel_selection': 'static', 'mask_time_min_space': 1, 'mask_time_other': 0.0, 'mask_time_selection': 'static', 'model_type': 'wav2vec2', 'no_mask_channel_overlap': False, 'no_mask_time_overlap': False, 'num_feat_extract_layers': 7, 'hidden_size': 768, 'feat_extract_norm': 'group', 'feat_extract_activation': 'gelu', 'conv_dim': [512, 512, 512, 512, 512, 512, 512], 'conv_stride': [5, 2, 2, 2, 2, 2, 2], 'conv_kernel': [10, 3, 3, 3, 3, 2, 2], 'conv_bias': False, 'num_conv_pos_embeddings': 128, 'num_conv_pos_embedding_groups': 16, 'num_hidden_layers': 12, 'intermediate_size': 3072, 'hidden_act': 'gelu', 'num_attention_heads': 12, 'hidden_dropout': 0.1, 'attention_dropout': 0.1, 'activation_dropout': 0.0, 'feat_proj_dropout': 0.1, 'final_dropout': 0.0, 'layerdrop': 0.0, 'layer_norm_eps': 1e-05, 'initializer_range': 0.02, 'vocab_size': 32, 'do_stable_layer_norm': False, 'use_weighted_layer_sum': False, 'apply_spec_augment': True, 'mask_time_prob': 0.05, 'mask_time_length': 10, 'mask_time_min_masks': 2, 'mask_feature_prob': 0.0, 'mask_feature_length': 10, 'mask_feature_min_masks': 0, 'num_codevectors_per_group': 320, 'num_codevector_groups': 2, 'contrastive_logits_temperature': 0.1, 'feat_quantizer_dropout': 0.0, 'num_negatives': 100, 'codevector_dim': 256, 'proj_codevector_dim': 256, 'diversity_loss_weight': 0.1, 'ctc_loss_reduction': 'sum', 'ctc_zero_infinity': False, 'add_adapter': False, 'adapter_kernel_size': 3, 'adapter_stride': 2, 'num_adapter_layers': 3, 'output_hidden_size': 768, 'adapter_attn_dim': None, 'classifier_proj_size': 256, 'tdnn_dim': [512, 512, 512, 512, 1500], 'tdnn_kernel': [5, 3, 3, 1, 1], 'tdnn_dilation': [1, 2, 3, 1, 1], 'xvector_output_dim': 512, 'output_dir': '/kaggle/working/', 'overwrite_output_dir': False, 'do_train': False, 'do_eval': True, 'do_predict': False, 'evaluation_strategy': 'steps', 'prediction_loss_only': False, 'per_device_train_batch_size': 16, 'per_device_eval_batch_size': 8, 'per_gpu_train_batch_size': None, 'per_gpu_eval_batch_size': None, 'gradient_accumulation_steps': 1, 'eval_accumulation_steps': None, 'eval_delay': 0, 'learning_rate': 0.0001, 'weight_decay': 0.005, 'adam_beta1': 0.9, 'adam_beta2': 0.999, 'adam_epsilon': 1e-08, 'max_grad_norm': 1.0, 'num_train_epochs': 30, 'max_steps': -1, 'lr_scheduler_type': 'linear', 'lr_scheduler_kwargs': {}, 'warmup_ratio': 0.0, 'warmup_steps': 1000, 'log_level': 'passive', 'log_level_replica': 'warning', 'log_on_each_node': True, 'logging_dir': '/kaggle/working/runs/May24_20-48-13_890e8b3ca76b', 'logging_strategy': 'steps', 'logging_first_step': False, 'logging_steps': 500, 'logging_nan_inf_filter': True, 'save_strategy': 'steps', 'save_steps': 500, 'save_total_limit': 2, 'save_safetensors': True, 'save_on_each_node': False, 'save_only_model': False, 'no_cuda': False, 'use_cpu': False, 'use_mps_device': False, 'seed': 42, 'data_seed': None, 'jit_mode_eval': False, 'use_ipex': False, 'bf16': False, 'fp16': True, 'fp16_opt_level': 'O1', 'half_precision_backend': 'auto', 'bf16_full_eval': False, 'fp16_full_eval': False, 'tf32': None, 'local_rank': 0, 'ddp_backend': None, 'tpu_num_cores': None, 'tpu_metrics_debug': False, 'debug': [], 'dataloader_drop_last': False, 'eval_steps': 500, 'dataloader_num_workers': 0, 'dataloader_prefetch_factor': None, 'past_index': -1, 'run_name': '/kaggle/working/', 'disable_tqdm': False, 'remove_unused_columns': True, 'label_names': None, 'load_best_model_at_end': False, 'metric_for_best_model': None, 'greater_is_better': None, 'ignore_data_skip': False, 'fsdp': [], 'fsdp_min_num_params': 0, 'fsdp_config': {'min_num_params': 0, 'xla': False, 'xla_fsdp_v2': False, 'xla_fsdp_grad_ckpt': False}, 'fsdp_transformer_layer_cls_to_wrap': None, 'accelerator_config': {'split_batches': False, 'dispatch_batches': None, 'even_batches': True, 'use_seedable_sampler': True}, 'deepspeed': None, 'label_smoothing_factor': 0.0, 'optim': 'adamw_torch', 'optim_args': None, 'adafactor': False, 'group_by_length': True, 'length_column_name': 'length', 'report_to': ['tensorboard', 'wandb'], 'ddp_find_unused_parameters': None, 'ddp_bucket_cap_mb': None, 'ddp_broadcast_buffers': None, 'dataloader_pin_memory': True, 'dataloader_persistent_workers': False, 'skip_memory_metrics': True, 'use_legacy_prediction_loop': False, 'push_to_hub': True, 'resume_from_checkpoint': None, 'hub_model_id': None, 'hub_strategy': 'every_save', 'hub_token': '<HUB_TOKEN>', 'hub_private_repo': False, 'hub_always_push': False, 'gradient_checkpointing': True, 'gradient_checkpointing_kwargs': None, 'include_inputs_for_metrics': False, 'fp16_backend': 'auto', 'push_to_hub_model_id': None, 'push_to_hub_organization': None, 'push_to_hub_token': '<PUSH_TO_HUB_TOKEN>', 'mp_parameters': '', 'auto_find_batch_size': False, 'full_determinism': False, 'torchdynamo': None, 'ray_scope': 'last', 'ddp_timeout': 1800, 'torch_compile': False, 'torch_compile_backend': None, 'torch_compile_mode': None, 'dispatch_batches': None, 'split_batches': None, 'include_tokens_per_second': False, 'include_num_input_tokens_seen': False, 'neftune_noise_alpha': None, 'optim_target_modules': None}
197
+ 2024-05-24 20:48:42,631 INFO MainThread:34 [jupyter.py:save_ipynb():373] not saving jupyter notebook
198
+ 2024-05-24 20:48:42,632 INFO MainThread:34 [wandb_init.py:_pause_backend():432] pausing backend
199
+ 2024-05-24 20:49:00,381 INFO MainThread:34 [wandb_init.py:_resume_backend():437] resuming backend
200
+ 2024-05-24 20:49:00,425 INFO MainThread:34 [jupyter.py:save_ipynb():373] not saving jupyter notebook
201
+ 2024-05-24 20:49:00,425 INFO MainThread:34 [wandb_init.py:_pause_backend():432] pausing backend
202
+ 2024-05-24 20:49:03,721 INFO MainThread:34 [wandb_init.py:_resume_backend():437] resuming backend
203
+ 2024-05-24 20:49:03,725 INFO MainThread:34 [jupyter.py:save_ipynb():373] not saving jupyter notebook
204
+ 2024-05-24 20:49:03,726 INFO MainThread:34 [wandb_init.py:_pause_backend():432] pausing backend
205
+ 2024-05-24 20:49:08,089 INFO MainThread:34 [wandb_init.py:_resume_backend():437] resuming backend
206
+ 2024-05-24 20:49:08,455 INFO MainThread:34 [jupyter.py:save_ipynb():373] not saving jupyter notebook
207
+ 2024-05-24 20:49:08,456 INFO MainThread:34 [wandb_init.py:_pause_backend():432] pausing backend
208
+ 2024-05-24 20:51:52,779 INFO MainThread:34 [wandb_init.py:_resume_backend():437] resuming backend
209
+ 2024-05-24 20:51:52,781 INFO MainThread:34 [jupyter.py:save_ipynb():373] not saving jupyter notebook
210
+ 2024-05-24 20:51:52,781 INFO MainThread:34 [wandb_init.py:_pause_backend():432] pausing backend
211
+ 2024-05-24 20:51:56,593 INFO MainThread:34 [wandb_init.py:_resume_backend():437] resuming backend
212
+ 2024-05-24 20:52:22,759 INFO MainThread:34 [jupyter.py:save_ipynb():373] not saving jupyter notebook
213
+ 2024-05-24 20:52:22,759 INFO MainThread:34 [wandb_init.py:_pause_backend():432] pausing backend
214
+ 2024-05-24 20:52:49,570 INFO MainThread:34 [wandb_init.py:_resume_backend():437] resuming backend
215
+ 2024-05-24 20:52:49,572 INFO MainThread:34 [jupyter.py:save_ipynb():373] not saving jupyter notebook
216
+ 2024-05-24 20:52:49,572 INFO MainThread:34 [wandb_init.py:_pause_backend():432] pausing backend
217
+ 2024-05-24 20:52:52,107 INFO MainThread:34 [wandb_init.py:_resume_backend():437] resuming backend
wandb/run-20240524_202737-n1w0kmmv/run-n1w0kmmv.wandb ADDED
Binary file (114 kB). View file