lucasrct commited on
Commit
93bc1d5
·
verified ·
1 Parent(s): dcdb4cd

Upload folder using huggingface_hub

Browse files
README.md ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # instruction_prompt_1_model
2
+
3
+ ## Model Description
4
+ This model is a fine-tuned version of `EleutherAI/pythia-410m` trained on `tatsu-lab/alpaca` data.
5
+
6
+ ## Dataset Details
7
+ - Dataset Configuration: None
8
+ - Dataset Name: tatsu-lab/alpaca
9
+ - Prompt: {instruction}
10
+ {input}
11
+ {output}
12
+
13
+ ## Training Details
14
+ - Base Model: EleutherAI/pythia-410m
15
+ - Training Parameters:
16
+ - Learning Rate: 2e-05
17
+ - Batch Size: 1
18
+ - Epochs: 1
config.json ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "EleutherAI/pythia-410m",
3
+ "architectures": [
4
+ "GPTNeoXForCausalLM"
5
+ ],
6
+ "attention_bias": true,
7
+ "attention_dropout": 0.0,
8
+ "bos_token_id": 0,
9
+ "classifier_dropout": 0.1,
10
+ "eos_token_id": 0,
11
+ "hidden_act": "gelu",
12
+ "hidden_dropout": 0.0,
13
+ "hidden_size": 1024,
14
+ "initializer_range": 0.02,
15
+ "intermediate_size": 4096,
16
+ "layer_norm_eps": 1e-05,
17
+ "max_position_embeddings": 2048,
18
+ "model_type": "gpt_neox",
19
+ "num_attention_heads": 16,
20
+ "num_hidden_layers": 24,
21
+ "partial_rotary_factor": 0.25,
22
+ "rope_scaling": null,
23
+ "rope_theta": 10000,
24
+ "rotary_emb_base": 10000,
25
+ "rotary_pct": 0.25,
26
+ "tie_word_embeddings": false,
27
+ "torch_dtype": "bfloat16",
28
+ "transformers_version": "4.48.0",
29
+ "use_cache": true,
30
+ "use_parallel_residual": true,
31
+ "vocab_size": 50304
32
+ }
generation_config.json ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "bos_token_id": 0,
4
+ "eos_token_id": 0,
5
+ "transformers_version": "4.48.0"
6
+ }
gpu_memory_log.txt ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ Current GPU Memory allocated: 2.37 GB
2
+ Max GPU Memory allocated: 4.06 GB
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:55d580821d1e4b25d698f0df97d8121ad5d91d8cb4986ed9ff5552d33a95e700
3
+ size 810702192
optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3197a0688cd8bbdbdfc64db8f67c7b85a8fbc6a0ae4d2760ce54dcdb6154d4dd
3
+ size 1621580026
ram_usage_log.txt ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ Total RAM: 186.68 GB
2
+ Available RAM: 144.00 GB
3
+ Used RAM: 38.55 GB
4
+ RAM Usage Percentage: 22.9%
rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:386fcc8cc1089aade9450d86fb239ea3483f455fd2d78d8378645feecfec9d69
3
+ size 14244
scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:57e79184a62b068ce886e3cf6b77d680712c8fe43cfbc9fc53fd4467e09009c5
3
+ size 1064
trainer_state.json ADDED
@@ -0,0 +1,110 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 0.9999786329351937,
5
+ "eval_steps": 500,
6
+ "global_step": 11700,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.08546825922523023,
13
+ "grad_norm": 9.1875,
14
+ "learning_rate": 1.910714285714286e-05,
15
+ "loss": 1.1855,
16
+ "step": 1000
17
+ },
18
+ {
19
+ "epoch": 0.17093651845046046,
20
+ "grad_norm": 10.6875,
21
+ "learning_rate": 1.7321428571428572e-05,
22
+ "loss": 0.2975,
23
+ "step": 2000
24
+ },
25
+ {
26
+ "epoch": 0.2564047776756907,
27
+ "grad_norm": 7.40625,
28
+ "learning_rate": 1.553571428571429e-05,
29
+ "loss": 0.3029,
30
+ "step": 3000
31
+ },
32
+ {
33
+ "epoch": 0.3418730369009209,
34
+ "grad_norm": 7.40625,
35
+ "learning_rate": 1.375e-05,
36
+ "loss": 0.2983,
37
+ "step": 4000
38
+ },
39
+ {
40
+ "epoch": 0.4273412961261511,
41
+ "grad_norm": 11.1875,
42
+ "learning_rate": 1.1964285714285716e-05,
43
+ "loss": 0.2896,
44
+ "step": 5000
45
+ },
46
+ {
47
+ "epoch": 0.5128095553513814,
48
+ "grad_norm": 5.375,
49
+ "learning_rate": 1.0178571428571429e-05,
50
+ "loss": 0.2888,
51
+ "step": 6000
52
+ },
53
+ {
54
+ "epoch": 0.5982778145766117,
55
+ "grad_norm": 2.890625,
56
+ "learning_rate": 8.392857142857144e-06,
57
+ "loss": 0.2868,
58
+ "step": 7000
59
+ },
60
+ {
61
+ "epoch": 0.6837460738018418,
62
+ "grad_norm": 5.1875,
63
+ "learning_rate": 6.607142857142858e-06,
64
+ "loss": 0.2861,
65
+ "step": 8000
66
+ },
67
+ {
68
+ "epoch": 0.7692143330270721,
69
+ "grad_norm": 3.9375,
70
+ "learning_rate": 4.821428571428572e-06,
71
+ "loss": 0.2897,
72
+ "step": 9000
73
+ },
74
+ {
75
+ "epoch": 0.8546825922523023,
76
+ "grad_norm": 4.875,
77
+ "learning_rate": 3.0357142857142856e-06,
78
+ "loss": 0.2873,
79
+ "step": 10000
80
+ },
81
+ {
82
+ "epoch": 0.9401508514775325,
83
+ "grad_norm": 6.21875,
84
+ "learning_rate": 1.25e-06,
85
+ "loss": 0.279,
86
+ "step": 11000
87
+ }
88
+ ],
89
+ "logging_steps": 1000,
90
+ "max_steps": 11700,
91
+ "num_input_tokens_seen": 0,
92
+ "num_train_epochs": 1,
93
+ "save_steps": 1000,
94
+ "stateful_callbacks": {
95
+ "TrainerControl": {
96
+ "args": {
97
+ "should_epoch_stop": false,
98
+ "should_evaluate": false,
99
+ "should_log": false,
100
+ "should_save": true,
101
+ "should_training_stop": true
102
+ },
103
+ "attributes": {}
104
+ }
105
+ },
106
+ "total_flos": 5.0868950925312e+16,
107
+ "train_batch_size": 1,
108
+ "trial_name": null,
109
+ "trial_params": null
110
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:08a68c4d2d72e04ca5f16ac17ac4550e4d4b6d4c97f37f634d5b930d89af2f1a
3
+ size 5304
training_config.yaml ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ model_name: "EleutherAI/pythia-410m"
2
+ learning_rate: 2.0e-5
3
+ batch_size: 1
4
+ num_epochs: 1
5
+ warmup_steps: 500
6
+ logging_steps: 1000
7
+ # eval_steps: 2000
8
+ save_steps: 1000
9
+ gradient_accumulation_steps: 4