Files changed (1) hide show
  1. README.md +21 -8
README.md CHANGED
@@ -1,6 +1,8 @@
1
  ---
 
 
2
  license: cc-by-nc-4.0
3
- base_model: google/gemma-7b-it
4
  tags:
5
  - generated_from_trainer
6
  - axolotl
@@ -11,15 +13,13 @@ tags:
11
  - gpt4
12
  - synthetic data
13
  - distillation
14
- model-index:
15
- - name: gemma-7b-openhermes
16
- results: []
17
  datasets:
18
  - mlabonne/chatml-OpenHermes2.5-dpo-binarized-alpha
19
- language:
20
- - en
21
- library_name: transformers
22
  pipeline_tag: text-generation
 
 
 
23
  ---
24
  <!-- This model card has been generated automatically according to the information the Trainer had access to. You
25
  should probably proofread and complete it, then remove this comment. -->
@@ -259,4 +259,17 @@ special_tokens:
259
  - Tokenizers 0.15.0
260
  - axolotl: 0.4.0
261
 
262
- [<img src="https://raw.githubusercontent.com/OpenAccess-AI-Collective/axolotl/main/image/axolotl-badge-web.png" alt="Built with Axolotl" width="200" height="32"/>](https://github.com/OpenAccess-AI-Collective/axolotl)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  ---
2
+ language:
3
+ - en
4
  license: cc-by-nc-4.0
5
+ library_name: transformers
6
  tags:
7
  - generated_from_trainer
8
  - axolotl
 
13
  - gpt4
14
  - synthetic data
15
  - distillation
 
 
 
16
  datasets:
17
  - mlabonne/chatml-OpenHermes2.5-dpo-binarized-alpha
18
+ base_model: google/gemma-7b-it
 
 
19
  pipeline_tag: text-generation
20
+ model-index:
21
+ - name: gemma-7b-openhermes
22
+ results: []
23
  ---
24
  <!-- This model card has been generated automatically according to the information the Trainer had access to. You
25
  should probably proofread and complete it, then remove this comment. -->
 
259
  - Tokenizers 0.15.0
260
  - axolotl: 0.4.0
261
 
262
+ [<img src="https://raw.githubusercontent.com/OpenAccess-AI-Collective/axolotl/main/image/axolotl-badge-web.png" alt="Built with Axolotl" width="200" height="32"/>](https://github.com/OpenAccess-AI-Collective/axolotl)
263
+ # [Open LLM Leaderboard Evaluation Results](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard)
264
+ Detailed results can be found [here](https://huggingface.co/datasets/open-llm-leaderboard/details_abideen__gemma-7b-openhermes)
265
+
266
+ | Metric |Value|
267
+ |---------------------------------|----:|
268
+ |Avg. |53.67|
269
+ |AI2 Reasoning Challenge (25-Shot)|51.28|
270
+ |HellaSwag (10-Shot) |71.93|
271
+ |MMLU (5-Shot) |53.56|
272
+ |TruthfulQA (0-shot) |47.18|
273
+ |Winogrande (5-shot) |68.19|
274
+ |GSM8k (5-shot) |29.87|
275
+