Create README.md
Browse files
README.md
ADDED
@@ -0,0 +1,30 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
license: apache-2.0
|
3 |
+
tags:
|
4 |
+
- finetuned
|
5 |
+
pipeline_tag: text-generation
|
6 |
+
inference: true
|
7 |
+
widget:
|
8 |
+
- messages:
|
9 |
+
- role: user
|
10 |
+
content: What is your favorite condiment?
|
11 |
+
|
12 |
+
extra_gated_description: If you want to learn more about how we process your personal data, please read our <a href="https://mistral.ai/terms/">Privacy Policy</a>.
|
13 |
+
---
|
14 |
+
|
15 |
+
|
16 |
+
## Use below code to download the mistral.
|
17 |
+
|
18 |
+
```py
|
19 |
+
|
20 |
+
#pip install -U transformers accelerate torch
|
21 |
+
|
22 |
+
import torch
|
23 |
+
from transformers import pipeline, set_seed
|
24 |
+
|
25 |
+
pipe = pipeline("text-generation", model=model_path, torch_dtype=torch.bfloat16, device_map="cuda")
|
26 |
+
messages = [{"role": "user", "content": query}]
|
27 |
+
outputs = pipe(messages, max_new_tokens=1000, do_sample=True, temperature=0.71, top_k=50, top_p=0.92,repetition_penalty=1)
|
28 |
+
print(outputs[0]["generated_text"][-1]["content"])
|
29 |
+
|
30 |
+
```
|