Update README.md
Browse files
README.md
CHANGED
@@ -44,42 +44,3 @@ It is suitable for fine-tuning on tasks such as:
|
|
44 |
### Recommendations
|
45 |
- Users should ensure that applications using this model respect cultural and religious sensitivities.
|
46 |
- Results should be verified by domain experts for critical applications.
|
47 |
-
|
48 |
-
## How to Get Started with the Model
|
49 |
-
|
50 |
-
```python
|
51 |
-
from transformers import AutoModelForCausalLM, AutoTokenizer
|
52 |
-
|
53 |
-
# Load the tokenizer and model
|
54 |
-
tokenizer = AutoTokenizer.from_pretrained("Ellbendls/Qwen-2.5-3b-Quran")
|
55 |
-
model = AutoModelForCausalLM.from_pretrained("Ellbendls/Qwen-2.5-3b-Quran")
|
56 |
-
|
57 |
-
# Move the model to GPU
|
58 |
-
model.to("cuda")
|
59 |
-
|
60 |
-
# Define the input message
|
61 |
-
messages = [
|
62 |
-
{
|
63 |
-
"role": "user",
|
64 |
-
"content": "Tafsirkan ayat ini اِهْدِنَا الصِّرَاطَ الْمُسْتَقِيْمَۙ"
|
65 |
-
}
|
66 |
-
]
|
67 |
-
|
68 |
-
# Generate the prompt using the tokenizer
|
69 |
-
prompt = tokenizer.apply_chat_template(messages, tokenize=False,
|
70 |
-
add_generation_prompt=True)
|
71 |
-
|
72 |
-
# Tokenize the prompt and move inputs to GPU
|
73 |
-
inputs = tokenizer(prompt, return_tensors='pt', padding=True,
|
74 |
-
truncation=True).to("cuda")
|
75 |
-
|
76 |
-
# Generate the output using the model
|
77 |
-
outputs = model.generate(**inputs, max_length=150,
|
78 |
-
num_return_sequences=1)
|
79 |
-
|
80 |
-
# Decode the output
|
81 |
-
text = tokenizer.decode(outputs[0], skip_special_tokens=True)
|
82 |
-
|
83 |
-
# Print the result
|
84 |
-
print(text.split("assistant")[1])
|
85 |
-
```
|
|
|
44 |
### Recommendations
|
45 |
- Users should ensure that applications using this model respect cultural and religious sensitivities.
|
46 |
- Results should be verified by domain experts for critical applications.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|