Update README.md
Browse files
README.md
CHANGED
@@ -1,5 +1,21 @@
|
|
1 |
-
|
2 |
```markdown
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
3 |
# babelAI/opt-6.7b-lora
|
4 |
|
5 |
## Model Description
|
@@ -39,6 +55,7 @@ Here is an example of how to load and use the `babelAI/opt-6.7b-lora` model:
|
|
39 |
import torch
|
40 |
from transformers import AutoModelForCausalLM, AutoTokenizer
|
41 |
from peft import PeftModel, PeftConfig
|
|
|
42 |
|
43 |
# Define the model ID
|
44 |
peft_model_id = "babelAI/opt-6.7b-lora"
|
@@ -117,3 +134,10 @@ This model is licensed under the MIT License.
|
|
117 |
|
118 |
For more information or questions, please contact the babelAI team at [babel.ai.dub@gmail.com].
|
119 |
```
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
```markdown
|
2 |
+
---
|
3 |
+
tags:
|
4 |
+
- text-generation
|
5 |
+
- transformers
|
6 |
+
- opt-6.7b
|
7 |
+
- lora
|
8 |
+
license: mit
|
9 |
+
datasets:
|
10 |
+
- wikipedia
|
11 |
+
- bookcorpus
|
12 |
+
- openwebtext
|
13 |
+
- conversational
|
14 |
+
metrics:
|
15 |
+
- perplexity
|
16 |
+
- accuracy
|
17 |
+
---
|
18 |
+
|
19 |
# babelAI/opt-6.7b-lora
|
20 |
|
21 |
## Model Description
|
|
|
55 |
import torch
|
56 |
from transformers import AutoModelForCausalLM, AutoTokenizer
|
57 |
from peft import PeftModel, PeftConfig
|
58 |
+
from transformers import BitsAndBytesConfig
|
59 |
|
60 |
# Define the model ID
|
61 |
peft_model_id = "babelAI/opt-6.7b-lora"
|
|
|
134 |
|
135 |
For more information or questions, please contact the babelAI team at [babel.ai.dub@gmail.com].
|
136 |
```
|
137 |
+
|
138 |
+
### Explanation:
|
139 |
+
|
140 |
+
- **tags**: Keywords related to the model.
|
141 |
+
- **license**: The license under which the model is distributed.
|
142 |
+
- **datasets**: Datasets used to train the model.
|
143 |
+
- **metrics**: Metrics used to evaluate the model.
|