MHGanainy/mgpt-lora-multi-shared-1024
Browse files- README.md +2 -0
- all_results.json +10 -10
- eval_results.json +5 -5
- train_results.json +5 -5
- trainer_state.json +0 -0
README.md
CHANGED
@@ -15,6 +15,8 @@ should probably proofread and complete it, then remove this comment. -->
|
|
15 |
# mgpt-lora-multi-shared-1024
|
16 |
|
17 |
This model is a fine-tuned version of [ai-forever/mGPT](https://huggingface.co/ai-forever/mGPT) on an unknown dataset.
|
|
|
|
|
18 |
|
19 |
## Model description
|
20 |
|
|
|
15 |
# mgpt-lora-multi-shared-1024
|
16 |
|
17 |
This model is a fine-tuned version of [ai-forever/mGPT](https://huggingface.co/ai-forever/mGPT) on an unknown dataset.
|
18 |
+
It achieves the following results on the evaluation set:
|
19 |
+
- Loss: 1.7504
|
20 |
|
21 |
## Model description
|
22 |
|
all_results.json
CHANGED
@@ -1,13 +1,13 @@
|
|
1 |
{
|
2 |
"epoch": 1.0,
|
3 |
-
"eval_loss":
|
4 |
-
"eval_runtime":
|
5 |
-
"eval_samples_per_second":
|
6 |
-
"eval_steps_per_second": 9.
|
7 |
-
"perplexity":
|
8 |
-
"total_flos":
|
9 |
-
"train_loss":
|
10 |
-
"train_runtime":
|
11 |
-
"train_samples_per_second":
|
12 |
-
"train_steps_per_second":
|
13 |
}
|
|
|
1 |
{
|
2 |
"epoch": 1.0,
|
3 |
+
"eval_loss": 1.750352382659912,
|
4 |
+
"eval_runtime": 1628.7902,
|
5 |
+
"eval_samples_per_second": 76.532,
|
6 |
+
"eval_steps_per_second": 9.567,
|
7 |
+
"perplexity": 5.7566308555301,
|
8 |
+
"total_flos": 7.631778497299481e+18,
|
9 |
+
"train_loss": 2.1485319636500972,
|
10 |
+
"train_runtime": 14119.5859,
|
11 |
+
"train_samples_per_second": 35.382,
|
12 |
+
"train_steps_per_second": 4.423
|
13 |
}
|
eval_results.json
CHANGED
@@ -1,8 +1,8 @@
|
|
1 |
{
|
2 |
"epoch": 1.0,
|
3 |
-
"eval_loss":
|
4 |
-
"eval_runtime":
|
5 |
-
"eval_samples_per_second":
|
6 |
-
"eval_steps_per_second": 9.
|
7 |
-
"perplexity":
|
8 |
}
|
|
|
1 |
{
|
2 |
"epoch": 1.0,
|
3 |
+
"eval_loss": 1.750352382659912,
|
4 |
+
"eval_runtime": 1628.7902,
|
5 |
+
"eval_samples_per_second": 76.532,
|
6 |
+
"eval_steps_per_second": 9.567,
|
7 |
+
"perplexity": 5.7566308555301
|
8 |
}
|
train_results.json
CHANGED
@@ -1,8 +1,8 @@
|
|
1 |
{
|
2 |
"epoch": 1.0,
|
3 |
-
"total_flos":
|
4 |
-
"train_loss":
|
5 |
-
"train_runtime":
|
6 |
-
"train_samples_per_second":
|
7 |
-
"train_steps_per_second":
|
8 |
}
|
|
|
1 |
{
|
2 |
"epoch": 1.0,
|
3 |
+
"total_flos": 7.631778497299481e+18,
|
4 |
+
"train_loss": 2.1485319636500972,
|
5 |
+
"train_runtime": 14119.5859,
|
6 |
+
"train_samples_per_second": 35.382,
|
7 |
+
"train_steps_per_second": 4.423
|
8 |
}
|
trainer_state.json
CHANGED
The diff for this file is too large to render.
See raw diff
|
|