bala3040 commited on
Commit
f5fe73f
·
verified ·
1 Parent(s): b03ab60

bala3040/qa_bot2

Browse files
README.md CHANGED
@@ -16,7 +16,7 @@ should probably proofread and complete it, then remove this comment. -->
16
 
17
  This model is a fine-tuned version of [TheBloke/Mistral-7B-Instruct-v0.2-GPTQ](https://huggingface.co/TheBloke/Mistral-7B-Instruct-v0.2-GPTQ) on the None dataset.
18
  It achieves the following results on the evaluation set:
19
- - Loss: 1.5351
20
 
21
  ## Model description
22
 
@@ -41,7 +41,7 @@ The following hyperparameters were used during training:
41
  - seed: 42
42
  - gradient_accumulation_steps: 4
43
  - total_train_batch_size: 4
44
- - optimizer: Use paged_adamw_8bit with betas=(0.9,0.999) and epsilon=1e-08 and optimizer_args=No additional optimizer arguments
45
  - lr_scheduler_type: linear
46
  - lr_scheduler_warmup_steps: 2
47
  - num_epochs: 2
@@ -51,14 +51,14 @@ The following hyperparameters were used during training:
51
 
52
  | Training Loss | Epoch | Step | Validation Loss |
53
  |:-------------:|:-----:|:----:|:---------------:|
54
- | 1.8113 | 1.0 | 75 | 1.5410 |
55
- | 1.3656 | 2.0 | 150 | 1.5351 |
56
 
57
 
58
  ### Framework versions
59
 
60
- - PEFT 0.13.2
61
- - Transformers 4.46.2
62
- - Pytorch 2.5.1+cu121
63
- - Datasets 3.1.0
64
- - Tokenizers 0.20.3
 
16
 
17
  This model is a fine-tuned version of [TheBloke/Mistral-7B-Instruct-v0.2-GPTQ](https://huggingface.co/TheBloke/Mistral-7B-Instruct-v0.2-GPTQ) on the None dataset.
18
  It achieves the following results on the evaluation set:
19
+ - Loss: 1.1865
20
 
21
  ## Model description
22
 
 
41
  - seed: 42
42
  - gradient_accumulation_steps: 4
43
  - total_train_batch_size: 4
44
+ - optimizer: Use OptimizerNames.PAGED_ADAMW_8BIT with betas=(0.9,0.999) and epsilon=1e-08 and optimizer_args=No additional optimizer arguments
45
  - lr_scheduler_type: linear
46
  - lr_scheduler_warmup_steps: 2
47
  - num_epochs: 2
 
51
 
52
  | Training Loss | Epoch | Step | Validation Loss |
53
  |:-------------:|:-----:|:----:|:---------------:|
54
+ | 5.9219 | 1.0 | 75 | 1.2440 |
55
+ | 3.9103 | 2.0 | 150 | 1.1865 |
56
 
57
 
58
  ### Framework versions
59
 
60
+ - PEFT 0.14.0
61
+ - Transformers 4.47.1
62
+ - Pytorch 2.5.1+cu124
63
+ - Datasets 3.2.0
64
+ - Tokenizers 0.21.0
adapter_config.json CHANGED
@@ -3,6 +3,8 @@
3
  "auto_mapping": null,
4
  "base_model_name_or_path": "TheBloke/Mistral-7B-Instruct-v0.2-GPTQ",
5
  "bias": "none",
 
 
6
  "fan_in_fan_out": false,
7
  "inference_mode": true,
8
  "init_lora_weights": true,
@@ -11,6 +13,7 @@
11
  "layers_to_transform": null,
12
  "loftq_config": {},
13
  "lora_alpha": 32,
 
14
  "lora_dropout": 0.05,
15
  "megatron_config": null,
16
  "megatron_core": "megatron.core",
 
3
  "auto_mapping": null,
4
  "base_model_name_or_path": "TheBloke/Mistral-7B-Instruct-v0.2-GPTQ",
5
  "bias": "none",
6
+ "eva_config": null,
7
+ "exclude_modules": null,
8
  "fan_in_fan_out": false,
9
  "inference_mode": true,
10
  "init_lora_weights": true,
 
13
  "layers_to_transform": null,
14
  "loftq_config": {},
15
  "lora_alpha": 32,
16
+ "lora_bias": false,
17
  "lora_dropout": 0.05,
18
  "megatron_config": null,
19
  "megatron_core": "megatron.core",
adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:adc6ab088a33e16b9cda8ea47ddb62bf3cafdfb95fdaa1720f37e9c3ea485f53
3
  size 8397056
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e899be230ddf2ea1d3807304cf1df69a9f2a2f5c1e0f56fe1a7ed12bbe49c0a3
3
  size 8397056
runs/Jan31_12-18-54_a96bef561d82/events.out.tfevents.1738325943.a96bef561d82.354.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:34cac1a24414b572ec9a29606d9f66b99ba0a24b299f9cc83e2093f37ced42a9
3
+ size 7035
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:87a84ebebfb36630aa3d0671ad77a89184bcbdeef613a995c91c2f4afce756c7
3
  size 5304
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8740a94d3634e777fa97b62f3c3e1737f5bf7869cb1ea17fe772436c0435d839
3
  size 5304