chathuru commited on
Commit
8394673
·
1 Parent(s): e07014b

CuATR-distilbert-LoRA

Browse files
README.md CHANGED
@@ -18,9 +18,9 @@ should probably proofread and complete it, then remove this comment. -->
18
 
19
  This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on an unknown dataset.
20
  It achieves the following results on the evaluation set:
21
- - Loss: 0.6890
22
- - Accuracy: 0.5652
23
- - F1: 0.7222
24
 
25
  ## Model description
26
 
@@ -52,11 +52,11 @@ The following hyperparameters were used during training:
52
 
53
  ### Training results
54
 
55
- | Training Loss | Epoch | Step | Validation Loss | Accuracy | F1 |
56
- |:-------------:|:-----:|:----:|:---------------:|:--------:|:------:|
57
- | 0.7122 | 0.67 | 1 | 0.6891 | 0.5652 | 0.7222 |
58
- | 0.7112 | 2.0 | 3 | 0.6890 | 0.5652 | 0.7222 |
59
- | 0.6955 | 2.67 | 4 | 0.6890 | 0.5652 | 0.7222 |
60
 
61
 
62
  ### Framework versions
 
18
 
19
  This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on an unknown dataset.
20
  It achieves the following results on the evaluation set:
21
+ - Loss: 0.6930
22
+ - Accuracy: 0.4348
23
+ - F1: 0.0
24
 
25
  ## Model description
26
 
 
52
 
53
  ### Training results
54
 
55
+ | Training Loss | Epoch | Step | Validation Loss | Accuracy | F1 |
56
+ |:-------------:|:-----:|:----:|:---------------:|:--------:|:---:|
57
+ | 0.6988 | 0.67 | 1 | 0.6933 | 0.4348 | 0.0 |
58
+ | 0.6919 | 2.0 | 3 | 0.6931 | 0.4348 | 0.0 |
59
+ | 0.708 | 2.67 | 4 | 0.6930 | 0.4348 | 0.0 |
60
 
61
 
62
  ### Framework versions
adapter_config.json CHANGED
@@ -8,17 +8,17 @@
8
  "init_lora_weights": true,
9
  "layers_pattern": null,
10
  "layers_to_transform": null,
11
- "lora_alpha": 16,
12
  "lora_dropout": 0.1,
13
  "modules_to_save": null,
14
  "peft_type": "LORA",
15
- "r": 16,
16
  "rank_pattern": {},
17
  "revision": null,
18
  "target_modules": [
19
- "v_lin",
20
  "k_lin",
21
- "q_lin"
22
  ],
23
  "task_type": "TOKEN_CLS"
24
  }
 
8
  "init_lora_weights": true,
9
  "layers_pattern": null,
10
  "layers_to_transform": null,
11
+ "lora_alpha": 32,
12
  "lora_dropout": 0.1,
13
  "modules_to_save": null,
14
  "peft_type": "LORA",
15
+ "r": 32,
16
  "rank_pattern": {},
17
  "revision": null,
18
  "target_modules": [
19
+ "q_lin",
20
  "k_lin",
21
+ "v_lin"
22
  ],
23
  "task_type": "TOKEN_CLS"
24
  }
adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:7b42e653456701e10c61c62e1d9ee2a365ae65a2669fde89dfc4d8dd35ae27cc
3
- size 1774720
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cb1e15b0cdf0fbf5843b9a4fb29ba339fba175c5524f64d493856c454c480b60
3
+ size 3544216
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:c85194ab166096f43ad5356cbba09fbe00e9f9707ef5784691ae1932410d2900
3
  size 4600
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c26d240ce75d08d702c5cfea7817f2d4881a2de4521774ac22c7368cbe5c22cb
3
  size 4600