akshayballal
commited on
Commit
•
c682b8b
1
Parent(s):
0c3b810
Update README.md
Browse files
README.md
CHANGED
@@ -32,6 +32,19 @@ This model is a function calling version of [google/gemma-2-2b](https://huggingf
|
|
32 |
```python
|
33 |
from unsloth import FastLanguageModel
|
34 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
35 |
alpaca_prompt = """Below are the tools that you have access to these tools. Use them if required.
|
36 |
|
37 |
### Tools:
|
|
|
32 |
```python
|
33 |
from unsloth import FastLanguageModel
|
34 |
|
35 |
+
max_seq_length = 2048 # Choose any! We auto support RoPE Scaling internally!
|
36 |
+
dtype = None # None for auto detection. Float16 for Tesla T4, V100, Bfloat16 for Ampere+
|
37 |
+
load_in_4bit = True # Use 4bit quantization to reduce memory usage. Can be False.
|
38 |
+
|
39 |
+
model, tokenizer = FastLanguageModel.from_pretrained(
|
40 |
+
model_name = "gemma2-2b-xlam-function-calling", # YOUR MODEL YOU USED FOR TRAINING
|
41 |
+
max_seq_length = 1024,
|
42 |
+
dtype = dtype,
|
43 |
+
load_in_4bit = load_in_4bit,
|
44 |
+
)
|
45 |
+
FastLanguageModel.for_inference(model) # Enable native 2x faster inference
|
46 |
+
|
47 |
+
|
48 |
alpaca_prompt = """Below are the tools that you have access to these tools. Use them if required.
|
49 |
|
50 |
### Tools:
|